repo_name stringlengths 9 55 | path stringlengths 7 120 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 169k | license stringclasses 12 values |
|---|---|---|---|---|---|
Jwuthri/Mozinor | mozinor/preprocess/transformer/fill.py | 1 | 6094 | # -*- coding: utf-8 -*-
"""
Created on July 2017
@author: JulienWuthrich
"""
import logging
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from mozinor.preprocess.settings import logger
class FillNaN(object):
"""Module to fill NaN thx to a clustering approach."""
def __init__(self, dataframe):
"""Fill NaN with a clustering.
Args:
-----
dataframe (pandas.DataFrame): data
"""
self.dataframe = dataframe
self.cols = self.noCategoricCols()
def noCategoricCols(self):
"""Select only non categoric cols.
Return:
-------
List of columns no categoric
"""
return list(self.dataframe.select_dtypes(
include=["float", "float64", "int", "int64"]
).columns)
def wcss(self, dataframe, max_cluster=20, plot=True):
"""Determine the best number of cluster.
Args:
-----
dataframe (pandas.DataFrame): data
max_cluster (int): the max number of cluster possible
plot (bool): print the plot
Return:
-------
list of wcss values
"""
dataframe = dataframe.fillna(dataframe.mean())
wcss = list()
for i in range(1, max_cluster):
kmeans = KMeans(n_clusters=i, init='k-means++')
kmeans.fit(dataframe[self.cols])
wcss.append(kmeans.inertia_)
if plot:
plt.plot(range(1, max_cluster), wcss)
plt.title('Elbow')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
return wcss
def computeLcurve(self, wcss):
"""Compute the lcurve.
Args:
-----
wcss (list): the elbow values
Return:
-------
dict k/v: number of clusters / lcurve value
"""
d_derivate = dict()
for i in range(1, len(wcss) - 1):
d_derivate[i] = wcss[i + 1] + wcss[i - 1] - 2 * wcss[i]
return d_derivate
def bestLcurveValue(self, d_derivate):
"""Select best lcurve value, the one that doesn't decrease anymore.
Args:
-----
d_derivate (dict): dict of nb_cluster / lcurve
Return:
-------
int, value of the optimal nb cluster
"""
nb_cluster = len(d_derivate)
for k, v in d_derivate.items():
if v < 0:
return k
return nb_cluster
def computeOptimalCluster(self, wcss):
"""Select the optimal number of clusters.
Args:
-----
wcss (list): list of wcs values
Return:
-------
int, of the optimal number of clusters
"""
d_derivate = self.computeLcurve(wcss)
return self.bestLcurveValue(d_derivate)
def nbCluster(self):
"""Choose the number of cluster thanks to Elbow method.
Return:
-------
Number of cluster
"""
user_input = input('How many clusters do you want ? ')
try:
return int(user_input)
except ValueError:
raise ValueError("An int is requiered")
def clustering(self, dataframe, nb_cluster=2):
"""Make a knn based on all columns.
Args:
-----
dataframe (pandas.DataFrame): data
nb_cluster (int): number of cluster, determined by elbow
Return:
-------
pandas.Serie contains the cluster for each rows
"""
dataframe = dataframe.fillna(dataframe.mean())
kmeans = KMeans(n_clusters=nb_cluster, init='k-means++')
return kmeans.fit_predict(dataframe[self.cols])
def meanCluster(self, dataframe, col):
"""Take the mean for each cluster/col.
Args:
-----
dataframe (pandas.DataFrame): data
col (str): the column to work on
Return:
-------
dict contains the k/v for each cluster and mean value
"""
d_cluster = dict()
for cluster in dataframe["Cluster"].unique():
d_cluster[cluster] = dataframe[
dataframe["Cluster"] == cluster
][col].mean()
return d_cluster
def fillCol(self, dataframe, col):
"""Fill NaN of a column thanks to dict of cluster values.
Args:
-----
dataframe (pandas.DataFrame): data
col (str): column to work on
Return:
-------
pandas.Serie with NaN filled
"""
d_cluster = self.meanCluster(dataframe, col)
nan_serie = dataframe[~dataframe[col].notnull()]
for idx, row in nan_serie.iterrows():
value = d_cluster.get(row["Cluster"])
dataframe.set_value(idx, col, value)
return dataframe[col]
def fillCols(self, dataframe):
"""Fill NaN for all columns.
Args:
-----
dataframe (pandas.DataFrame): data
Return:
-------
pandas.DataFrame with new value instead of NaN
"""
for col in self.cols:
logger.log("Filling NaN, column: {}".format(col), logging.DEBUG)
dataframe[col] = self.fillCol(dataframe, col)
return dataframe.drop("Cluster", axis=1)
def fill(self):
"""Fill the dataframe.
Return:
-------
pandas.DataFrame filled
"""
dataframe = self.dataframe.copy()
wcss = self.wcss(dataframe)
nb_cluster = self.computeOptimalCluster(wcss)
logger.log("Optimal nb of cluster is: {}".format(nb_cluster), logging.DEBUG)
dataframe["Cluster"] = self.clustering(dataframe, nb_cluster)
return self.fillCols(dataframe)
| mit |
cyberphox/MissionPlanner | Lib/site-packages/numpy/lib/function_base.py | 53 | 108301 | __docformat__ = "restructuredtext en"
__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp']
import warnings
import types
import sys
import numpy.core.numeric as _nx
from numpy.core import linspace
from numpy.core.numeric import ones, zeros, arange, concatenate, array, \
asarray, asanyarray, empty, empty_like, ndarray, around
from numpy.core.numeric import ScalarType, dot, where, newaxis, intp, \
integer, isscalar
from numpy.core.umath import pi, multiply, add, arctan2, \
frompyfunc, isnan, cos, less_equal, sqrt, sin, mod, exp, log10
from numpy.core.fromnumeric import ravel, nonzero, choose, sort, mean
from numpy.core.numerictypes import typecodes, number
from numpy.core import atleast_1d, atleast_2d
from numpy.lib.twodim_base import diag
if sys.platform != 'cli':
from _compiled_base import _insert, add_docstring
from _compiled_base import digitize, bincount, interp as compiled_interp
else:
from _compiled_base import _insert, bincount
# TODO: Implement these
def add_docstring(*args, **kw):
pass
def digitize(*args, **kw):
raise NotImplementedError()
def compiled_interp(*args, **kw):
raise NotImplementedError()
from arraysetops import setdiff1d
from utils import deprecate
import numpy as np
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try: iter(y)
except: return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), normed=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, normed=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if range is None:
range = (a.min(), a.max())
mn, mx = [mi+0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins+1, endpoint=True)
uniform = True
else:
bins = asarray(bins)
uniform = False
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero,], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if normed:
db = array(np.diff(bins), float)
if not uniform:
warnings.warn("""
This release of NumPy fixes a normalization bug in histogram
function occuring with non-uniform bin widths. The returned
value is now a density: n / (N * bin width), where n is the
bin count and N the total number of points.
""")
return n/db/n.sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : boolean, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, ie, the bin count divided by the bin hypervolume.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights for
the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1D histogram
histogram2d: 2D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal'\
' to the dimension of the sample x.')
except TypeError:
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1)
else:
edges[i] = asarray(bins[i], float)
nbin[i] = len(edges[i])+1 # +1 for outlier bins
dedges[i] = diff(edges[i])
nbin = asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:,i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
outliers = zeros(N, int)
for i in arange(D):
# Rounding precision
decimal = int(-log10(dedges[i].min())) +6
# Find which points are on the rightmost edge.
on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1],
decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
shape = []
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i,j)
ni[i],ni[j] = ni[j],ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1,-1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
if weights is None :
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else :
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape :
if axis is None :
raise TypeError(
"Axis must be specified when shapes of a "\
"and weights differ.")
if wgt.ndim != 1 :
raise TypeError(
"1D weights expected when shapes of a and "\
"weights differ.")
if wgt.shape[0] != a.shape[axis] :
raise ValueError(
"Length of weights not compatible with "\
"specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a)
array([1, 2])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a)
if (a.dtype.char in typecodes['AllFloat']) \
and (_nx.isnan(a).any() or _nx.isinf(a).any()):
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have undefined values.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.arange(6) - 2.5
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray)):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError(
"list of cases must be same length as list of conditions")
choicelist = [default] + choicelist
S = 0
pfac = 1
for k in range(1, n+1):
S += k * pfac * asarray(condlist[k-1])
if k < n:
pfac *= (1-asarray(condlist[k-1]))
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape)==1:
pfac = asarray(1)
for k in range(n2+1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S*ones(asarray(pfac).shape, type(S))
else:
S = S*ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
def copy(a):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, copy=True)
# Basic operations
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
`*varargs` : scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: `dx`, `dy`, `dz`, ... The default distance is 1.
Returns
-------
g : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]),
array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
"""
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
# use central differences on interior and first differences on endpoints
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D']:
otype = 'd'
for axis in range(N):
# select out appropriate parts for this dimension
out = np.zeros_like(f).astype(otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
out[slice1] = (f[slice2] - f[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0])
out[slice1] = (f[slice2] - f[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2])
out[slice1] = (f[slice2] - f[slice3])
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
out : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, defaults is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasingness is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd+pi, 2*pi)-pi
_nx.putmask(ddmod, (ddmod==-pi) & (dd > 0), pi)
ph_correct = ddmod - dd;
_nx.putmask(ph_correct, abs(dd)<discont, 0)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a,copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.: break
else: first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.: break
else: last = last - 1
return filt[first:last]
import sys
if sys.hexversion < 0x2040000:
from sets import Set as set
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True],tmp[1:]!=tmp[:-1]))
return tmp[idx]
except AttributeError:
items = list(set(x))
items.sort()
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
See Also
--------
take, put, putmask, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.putmask(arr, mask, vals)``, the difference is that `place`
uses the first N elements of `vals`, where N is the number of True values
in `mask`, while `putmask` uses the elements where `mask` is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
putmask, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def _nanop(op, fill, a, axis=None):
"""
General operation on arrays with not-a-number values.
Parameters
----------
op : callable
Operation to perform.
fill : float
NaN values are set to fill before doing the operation.
a : array-like
Input array.
axis : {int, None}, optional
Axis along which the operation is computed.
By default the input is flattened.
Returns
-------
y : {ndarray, scalar}
Processed data.
"""
y = array(a, subok=True)
# We only need to take care of NaN's in floating point arrays
if np.issubdtype(y.dtype, np.integer):
return op(y, axis=axis)
mask = isnan(a)
# y[mask] = fill
# We can't use fancy indexing here as it'll mess w/ MaskedArrays
# Instead, let's fill the array directly...
np.putmask(y, mask, fill)
res = op(y, axis=axis)
mask_all_along_axis = mask.all(axis=axis)
# Along some axes, only nan's were encountered. As such, any values
# calculated along that axis should be set to nan.
if mask_all_along_axis.any():
if np.isscalar(res):
res = np.nan
else:
res[mask_all_along_axis] = np.nan
return res
def nansum(a, axis=None):
"""
Return the sum of array elements over a given axis treating
Not a Numbers (NaNs) as zero.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute
the sum of the flattened array.
Returns
-------
y : ndarray
An array with the same shape as a, with the specified axis removed.
If a is a 0-d array, or if axis is None, a scalar is returned with
the same dtype as `a`.
See Also
--------
numpy.sum : Sum across array including Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
If positive or negative infinity are present the result is positive or
negative infinity. But if both positive and negative infinity are present,
the result is Not A Number (NaN).
Arithmetic is modular when using integer types (all elements of `a` must
be finite i.e. no elements that are NaNs, positive infinity and negative
infinity because NaNs are floating point types), and no error is raised
on overflow.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
When positive infinity and negative infinity are present
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, np.NINF])
nan
"""
return _nanop(np.sum, 0, a, axis)
def nanmin(a, axis=None):
"""
Return the minimum of an array or minimum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired.
axis : int, optional
Axis along which the minimum is computed.The default is to compute
the minimum of the flattened array.
Returns
-------
nanmin : ndarray
A new array or a scalar array with the result.
See Also
--------
numpy.amin : Minimum across array including any Not a Numbers.
numpy.nanmax : Maximum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type, an integer type is returned unless
the input contains NaNs and infinity.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
return _nanop(np.min, np.inf, a, axis)
def nanargmin(a, axis=None):
"""
Return indices of the minimum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
return _nanop(np.argmin, np.inf, a, axis)
def nanmax(a, axis=None):
"""
Return the maximum of an array or maximum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not
an array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, a ndarray scalar is
returned. The the same dtype as `a` is returned.
See Also
--------
numpy.amax : Maximum across array including any Not a Numbers.
numpy.nanmin : Minimum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type, an integer type is returned unless
the input contains NaNs and infinity.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
return _nanop(np.max, -np.inf, a, axis)
def nanargmax(a, axis=None):
"""
Return indices of the maximum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
return _nanop(np.argmax, -np.inf, a, axis)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
import sys
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
# return number of input arguments and
# number of default arguments
def _get_nargs(obj):
import re
terr = re.compile(r'.*? takes (exactly|at least) (?P<exargs>(\d+)|(\w+))' +
r' argument(s|) \((?P<gargs>(\d+)|(\w+)) given\)')
def _convert_to_int(strval):
try:
result = int(strval)
except ValueError:
if strval=='zero':
result = 0
elif strval=='one':
result = 1
elif strval=='two':
result = 2
# How high to go? English only?
else:
raise
return result
if not callable(obj):
raise TypeError(
"Object is not callable.")
if sys.version_info[0] >= 3:
# inspect currently fails for binary extensions
# like math.cos. So fall back to other methods if
# it fails.
import inspect
try:
spec = inspect.getargspec(obj)
nargs = len(spec.args)
if spec.defaults:
ndefaults = len(spec.defaults)
else:
ndefaults = 0
if inspect.ismethod(obj):
nargs -= 1
return nargs, ndefaults
except:
pass
if hasattr(obj,'func_code'):
fcode = obj.func_code
nargs = fcode.co_argcount
if obj.func_defaults is not None:
ndefaults = len(obj.func_defaults)
else:
ndefaults = 0
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
try:
obj()
return 0, 0
except TypeError, msg:
m = terr.match(str(msg))
if m:
nargs = _convert_to_int(m.group('exargs'))
ndefaults = _convert_to_int(m.group('gargs'))
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
raise ValueError(
"failed to determine the number of arguments for %s" % (obj))
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If None, the docstring will be the
`pyfunc` one.
Examples
--------
>>> def myfunc(a, b):
... \"\"\"Return a-b if a>b, otherwise return a+b\"\"\"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
"""
def __init__(self, pyfunc, otypes='', doc=None):
self.thefunc = pyfunc
self.ufunc = None
nin, ndefault = _get_nargs(pyfunc)
if nin == 0 and ndefault == 0:
self.nin = None
self.nin_wo_defaults = None
else:
self.nin = nin
self.nin_wo_defaults = nin - ndefault
self.nout = None
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"invalid otype specified")
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
self.lastcallargs = 0
def __call__(self, *args):
# get number of outputs and output types by calling
# the function on the first entries of args
nargs = len(args)
if self.nin:
if (nargs > self.nin) or (nargs < self.nin_wo_defaults):
raise ValueError(
"Invalid number of arguments")
# we need a new ufunc if this is being called with more arguments.
if (self.lastcallargs != nargs):
self.lastcallargs = nargs
self.ufunc = None
self.nout = None
if self.nout is None or self.otypes == '':
newargs = []
for arg in args:
newargs.append(asarray(arg).flat[0])
theout = self.thefunc(*newargs)
if isinstance(theout, tuple):
self.nout = len(theout)
else:
self.nout = 1
theout = (theout,)
if self.otypes == '':
otypes = []
for k in range(self.nout):
otypes.append(asarray(theout[k]).dtype.char)
self.otypes = ''.join(otypes)
# Create ufunc if not already created
if (self.ufunc is None):
self.ufunc = frompyfunc(self.thefunc, nargs, self.nout)
# Convert to object arrays first
newargs = [array(arg,copy=False,subok=True,dtype=object) for arg in args]
if self.nout == 1:
_res = array(self.ufunc(*newargs),copy=False,
subok=True,dtype=self.otypes[0])
else:
_res = tuple([array(x,copy=False,subok=True,dtype=c) \
for x, c in zip(self.ufunc(*newargs), self.otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
X = array(m, ndmin=2, dtype=float)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None),newaxis)
else:
axis = 1
tup = (newaxis, slice(None))
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=float)
X = concatenate((X,y), axis)
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
try:
d = diag(c)
except ValueError: # scalar covariance
return 1
return c/sqrt(multiply.outer(d,d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, normalized to one (the value one appears only if the
number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> from numpy import blackman
>>> blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy import clip, log10, array, blackman, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, normalized to one (the value one
appears only if the number of samples is odd), with the first
and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy import clip, log10, array, bartlett, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, normalized to one (the value one
appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. Some authors prefer that it be called a
Hann window, to help avoid confusion with the very similar Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> from numpy import hanning
>>> hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = np.linspace(-0.5,0.5,len(A))
>>> response = 20*np.log10(mag)
>>> response = np.clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
# XXX: this docstring is inconsistent with other filter windows, e.g.
# Blackman and Bartlett - they should all follow the same convention for
# clarity. Either use np. for all numpy members (as above), or import all
# numpy members (as in Blackman and Bartlett examples)
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 + 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in xrange(1,len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is partitioned
into the two intervals [0,8] and (8,inf), and Chebyshev polynomial
expansions are employed in each interval. Relative error on the domain
[0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16
with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions," in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x<0)
x[ind] = -x[ind]
ind = (x<=8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M,beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise nans will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> from numpy import kaiser
>>> kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy import clip, log10, array, kaiser, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0,M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a
Lanczos resampling filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.arange(-20., 21.)/5.
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.arange(-200., 201.)/50.
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi* where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a,subok=True,copy=True)
b.sort(0)
return b
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {None, int}, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : {False, True}, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
indexer = [slice(None)] * sorted.ndim
index = int(sorted.shape[axis]/2)
if sorted.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(sorted[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None, overwrite_input=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
percentile to compute which must be between 0 and 100 inclusive
axis : {None, int}, optional
Axis along which the percentiles are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : {False, True}, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
pcntile : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the qth percentile of V is the qth ranked
value in a sorted copy of V. A weighted average of the two nearest neighbors
is used if the normalized ranking does not match q exactly.
The same as the median if q is 0.5; the same as the min if q is 0;
and the same as the max if q is 1
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 0.5)
3.5
>>> np.percentile(a, 0.5, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.percentile(a, 0.5, axis=1)
array([ 7., 2.])
>>> m = np.percentile(a, 0.5, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 0.5, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 0.5, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 0.5, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
a = np.asarray(a)
if q == 0:
return a.min(axis=axis, out=out)
elif q == 100:
return a.max(axis=axis, out=out)
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, q, axis, out)
# handle sequence of q's without calling sort multiple times
def _compute_qth_percentile(sorted, q, axis, out):
if not isscalar(q):
p = [_compute_qth_percentile(sorted, qi, axis, None)
for qi in q]
if out is not None:
out.flat = p
return p
q = q / 100.0
if (q < 0) or (q > 1):
raise ValueError, "percentile must be either in the range [0,100]"
indexer = [slice(None)] * sorted.ndim
Nx = sorted.shape[axis]
index = q*(Nx-1)
i = int(index)
if i == index:
indexer[axis] = slice(i, i+1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i+2)
j = i + 1
weights = array([(j - index), (index - i)],float)
wshape = [1]*sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use add.reduce in both cases to coerce data type as well as
# check and use out array.
return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
out : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
be taken from `y` array, by default x-axis distances between points will be
1.0, alternatively they can be provided with `x` array or with `dx` scalar.
Return value will be equal to combined area under the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
try:
ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis)
except ValueError: # Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
"""
try:
new = {}
exec 'from %s import %s' % (place, obj) in new
if isinstance(doc, str):
add_docstring(new[obj], doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new[obj], doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new[obj], val[0]), val[1].strip())
except:
pass
# From matplotlib
def meshgrid(x,y):
"""
Return coordinate matrices from two coordinate vectors.
Parameters
----------
x, y : ndarray
Two 1-D arrays representing the x and y coordinates of a grid.
Returns
-------
X, Y : ndarray
For vectors `x`, `y` with lengths ``Nx=len(x)`` and ``Ny=len(y)``,
return `X`, `Y` where `X` and `Y` are ``(Ny, Nx)`` shaped arrays
with the elements of `x` and y repeated to fill the matrix along
the first dimension for `x`, the second for `y`.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> X, Y = np.meshgrid([1,2,3], [4,5,6,7])
>>> X
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
>>> Y
array([[4, 4, 4],
[5, 5, 5],
[6, 6, 6],
[7, 7, 7]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)/(xx**2+yy**2)
"""
x = asarray(x)
y = asarray(y)
numRows, numCols = len(y), len(x) # yes, reversed
x = x.reshape(1,numCols)
X = x.repeat(numRows, axis=0)
y = y.reshape(numRows,1)
Y = y.repeat(numCols, axis=1)
return X, Y
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim;
axis = ndim-1;
if ndim == 0:
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if (obj < 0 or obj >=N):
raise ValueError(
"invalid entry")
newshape[axis]-=1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1,None)
new[slobj] = arr[slobj2]
elif isinstance(obj, slice):
start, stop, step = obj.indices(N)
numtodel = len(xrange(start, stop, step))
if numtodel <= 0:
if wrap:
return wrap(new)
else:
return arr.copy()
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
obj = arange(start, stop, step, dtype=intp)
all = arange(start, stop, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = obj
new[slobj] = arr[slobj2]
else: # default behavior
obj = array(obj, dtype=intp, copy=0, ndmin=1)
all = arange(N, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = obj
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
delete : Delete elements from an array.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim-1
if (ndim == 0):
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if obj < 0 or obj > N:
raise ValueError(
"index (%d) out of range (0<=index<=%d) "\
"in dimension %d" % (obj, N, axis))
newshape[axis] += 1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = obj
new[slobj] = values
slobj[axis] = slice(obj+1,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj,None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif isinstance(obj, slice):
# turn it into a range object
obj = arange(*obj.indices(N),**{'dtype':intp})
# get two sets of indices
# one is the indices which will hold the new stuff
# two is the indices where arr will be copied over
obj = asarray(obj, dtype=intp)
numnew = len(obj)
index1 = obj + arange(numnew)
index2 = setdiff1d(arange(numnew+N),index1)
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = index1
slobj2[axis] = index2
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
out : ndarray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| gpl-3.0 |
macks22/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
mhvk/astropy | astropy/visualization/wcsaxes/axislabels.py | 8 | 4732 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib import rcParams
from matplotlib.text import Text
import matplotlib.transforms as mtransforms
from .frame import RectangularFrame
class AxisLabels(Text):
def __init__(self, frame, minpad=1, *args, **kwargs):
# Use rcParams if the following parameters were not specified explicitly
if 'weight' not in kwargs:
kwargs['weight'] = rcParams['axes.labelweight']
if 'size' not in kwargs:
kwargs['size'] = rcParams['axes.labelsize']
if 'color' not in kwargs:
kwargs['color'] = rcParams['axes.labelcolor']
self._frame = frame
super().__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.set_ha('center')
self.set_va('center')
self._minpad = minpad
self._visibility_rule = 'labels'
def get_minpad(self, axis):
try:
return self._minpad[axis]
except TypeError:
return self._minpad
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self._frame.keys()
else:
return [x for x in self._visible_axes if x in self._frame]
def set_minpad(self, minpad):
self._minpad = minpad
def set_visibility_rule(self, value):
allowed = ['always', 'labels', 'ticks']
if value not in allowed:
raise ValueError(f"Axis label visibility rule must be one of{' / '.join(allowed)}")
self._visibility_rule = value
def get_visibility_rule(self):
return self._visibility_rule
def draw(self, renderer, bboxes, ticklabels_bbox,
coord_ticklabels_bbox, ticks_locs, visible_ticks):
if not self.get_visible():
return
text_size = renderer.points_to_pixels(self.get_size())
# Flatten the bboxes for all coords and all axes
ticklabels_bbox_list = []
for bbcoord in ticklabels_bbox.values():
for bbaxis in bbcoord.values():
ticklabels_bbox_list += bbaxis
for axis in self.get_visible_axes():
if self.get_visibility_rule() == 'ticks':
if not ticks_locs[axis]:
continue
elif self.get_visibility_rule() == 'labels':
if not coord_ticklabels_bbox:
continue
padding = text_size * self.get_minpad(axis)
# Find position of the axis label. For now we pick the mid-point
# along the path but in future we could allow this to be a
# parameter.
x, y, normal_angle = self._frame[axis]._halfway_x_y_angle()
label_angle = (normal_angle - 90.) % 360.
if 135 < label_angle < 225:
label_angle += 180
self.set_rotation(label_angle)
# Find label position by looking at the bounding box of ticks'
# labels and the image. It sets the default padding at 1 times the
# axis label font size which can also be changed by setting
# the minpad parameter.
if isinstance(self._frame, RectangularFrame):
if len(ticklabels_bbox_list) > 0 and ticklabels_bbox_list[0] is not None:
coord_ticklabels_bbox[axis] = [mtransforms.Bbox.union(ticklabels_bbox_list)]
else:
coord_ticklabels_bbox[axis] = [None]
visible = axis in visible_ticks and coord_ticklabels_bbox[axis][0] is not None
if axis == 'l':
if visible:
x = coord_ticklabels_bbox[axis][0].xmin
x = x - padding
elif axis == 'r':
if visible:
x = coord_ticklabels_bbox[axis][0].x1
x = x + padding
elif axis == 'b':
if visible:
y = coord_ticklabels_bbox[axis][0].ymin
y = y - padding
elif axis == 't':
if visible:
y = coord_ticklabels_bbox[axis][0].y1
y = y + padding
else: # arbitrary axis
x = x + np.cos(np.radians(normal_angle)) * (padding + text_size * 1.5)
y = y + np.sin(np.radians(normal_angle)) * (padding + text_size * 1.5)
self.set_position((x, y))
super().draw(renderer)
bb = super().get_window_extent(renderer)
bboxes.append(bb)
| bsd-3-clause |
bmazin/ARCONS-pipeline | util/ObsFile.py | 1 | 114785 | #!/bin/python
'''
Author: Matt Strader Date: August 19, 2012
The class ObsFile is an interface to observation files. It provides methods for typical ways of accessing and viewing observation data. It can also load and apply wavelength and flat calibration. With calibrations loaded, it can write the obs file out as a photon list
Looks for observation files in $MKID_RAW_PATH and calibration files organized in $MKID_PROC_PATH (intermediate or scratch path)
Class Obsfile:
__init__(self, fileName,verbose=False)
__del__(self)
__iter__(self)
loadFile(self, fileName,verbose=False)
checkIntegrity(self,firstSec=0,integrationTime=-1)
convertToWvl(self, pulseHeights, iRow, iCol, excludeBad=True)
createEmptyPhotonListFile(self)
displaySec(self, firstSec=0, integrationTime= -1, weighted=False,fluxWeighted=False, plotTitle='', nSdevMax=2,scaleByEffInt=False)
getFromHeader(self, name)
getPixel(self, iRow, iCol, firstSec=0, integrationTime= -1)
getPixelWvlList(self,iRow,iCol,firstSec=0,integrationTime=-1,excludeBad=True,dither=True)
getPixelCount(self, iRow, iCol, firstSec=0, integrationTime= -1,weighted=False, fluxWeighted=False, getRawCount=False)
getPixelLightCurve(self, iRow, iCol, firstSec=0, lastSec=-1, cadence=1, **kwargs)
getPixelPacketList(self, iRow, iCol, firstSec=0, integrationTime= -1)
getTimedPacketList_old(self, iRow, iCol, firstSec=0, integrationTime= -1)
getTimedPacketList(self, iRow, iCol, firstSec=0, integrationTime= -1)
getPixelCountImage(self, firstSec=0, integrationTime= -1, weighted=False,fluxWeighted=False, getRawCount=False,scaleByEffInt=False)
getAperturePixelCountImage(self, firstSec=0, integrationTime= -1, y_values=range(46), x_values=range(44), y_sky=[], x_sky=[], apertureMask=np.ones((46,44)), skyMask=np.zeros((46,44)), weighted=False, fluxWeighted=False, getRawCount=False, scaleByEffInt=False)
getSpectralCube(self,firstSec=0,integrationTime=-1,weighted=True,wvlStart=3000,wvlStop=13000,wvlBinWidth=None,energyBinWidth=None,wvlBinEdges=None)
getPixelSpectrum(self, pixelRow, pixelCol, firstSec=0, integrationTime= -1,weighted=False, fluxWeighted=False, wvlStart=3000, wvlStop=13000, wvlBinWidth=None, energyBinWidth=None, wvlBinEdges=None)
getPixelBadTimes(self, pixelRow, pixelCol)
getDeadPixels(self, showMe=False, weighted=True, getRawCount=False)
getNonAllocPixels(self, showMe=False)
getRoachNum(self,iRow,iCol)
getFrame(self, firstSec=0, integrationTime=-1)
loadCentroidListFile(self, centroidListFileName)
loadFlatCalFile(self, flatCalFileName)
loadFluxCalFile(self, fluxCalFileName)
loadHotPixCalFile(self, hotPixCalFileName, switchOnMask=True)
loadTimeAdjustmentFile(self,timeAdjustFileName,verbose=False)
loadWvlCalFile(self, wvlCalFileName)
loadFilter(self, filterName = 'V', wvlBinEdges = None,switchOnFilter = True):
makeWvlBins(energyBinWidth=.1, wvlStart=3000, wvlStop=13000)
parsePhotonPackets(self, packets, inter=interval(),doParabolaFitPeaks=True, doBaselines=True)
plotPixelSpectra(self, pixelRow, pixelCol, firstSec=0, integrationTime= -1,weighted=False, fluxWeighted=False)getApertureSpectrum(self, pixelRow, pixelCol, radius1, radius2, weighted=False, fluxWeighted=False, lowCut=3000, highCut=7000,firstSec=0,integrationTime=-1)
plotPixelLightCurve(self,iRow,iCol,firstSec=0,lastSec=-1,cadence=1,**kwargs)
plotApertureSpectrum(self, pixelRow, pixelCol, radius1, radius2, weighted=False, fluxWeighted=False, lowCut=3000, highCut=7000, firstSec=0,integrationTime=-1)
setWvlCutoffs(self, wvlLowerLimit=3000, wvlUpperLimit=8000)
switchOffHotPixTimeMask(self)
switchOnHotPixTimeMask(self, reasons=[])
switchOffFilter(self)
switchOnFilter(self)
writePhotonList(self)
calculateSlices_old(inter, timestamps)
calculateSlices(inter, timestamps)
repackArray(array, slices)
'''
import sys, os
import warnings
import time
import numpy as np
from numpy import vectorize
from numpy import ma
from scipy import pi
import matplotlib.pyplot as plt
from matplotlib.dates import strpdate2num
from interval import interval, inf, imath
import tables
from tables.nodes import filenode
import astropy.constants
from util import utils
from util import MKIDStd
from util.FileName import FileName
from headers import TimeMask
from util.CalLookupFile import CalLookupFile
class ObsFile:
h = astropy.constants.h.to('eV s').value #4.135668e-15 #eV s
c = astropy.constants.c.to('m/s').value #'2.998e8 #m/s
angstromPerMeter = 1e10
nCalCoeffs = 3
def __init__(self, fileName, verbose=False, makeMaskVersion='v2',repeatable=False):
"""
load the given file with fileName relative to $MKID_RAW_PATH
"""
self.makeMaskVersion = makeMaskVersion
self.loadFile(fileName,verbose=verbose)
self.beammapFileName = None #Normally the beammap comes directly from the raw obs file itself, so this is only relevant if a new one is loaded with 'loadBeammapFile'.
self.wvlCalFile = None #initialize to None for an easy test of whether a cal file has been loaded
self.wvlCalFileName = None
self.flatCalFile = None
self.flatCalFileName = None
self.fluxCalFile = None
self.fluxCalFileName = None
self.filterIsApplied = None
self.filterTrans = None
self.timeAdjustFile = None
self.timeAdjustFileName = None
self.hotPixFile = None
self.hotPixFileName = None
self.hotPixTimeMask = None
self.hotPixIsApplied = False
self.cosmicMaskIsApplied = False
self.cosmicMask = None # interval of times to mask cosmic ray events
self.cosmicMaskFileName = None
self.centroidListFile = None
self.centroidListFileName = None
self.wvlLowerLimit = None
self.wvlUpperLimit = None
if repeatable:
self.setWvlDitherSeed(seed=0)
else:
self.setWvlDitherSeed()
def __del__(self):
"""
Closes the obs file and any cal files that are open
"""
try:
self.file.close()
except:
pass
try:
self.wvlCalFile.close()
except:
pass
try:
self.flatCalFile.close()
except:
pass
try:
self.fluxCalFile.close()
except:
pass
try:
self.timeAdjustFile.close()
except:
pass
try:
self.hotPixFile.close()
except:
pass
try:
self.centroidListFile.close()
except:
pass
self.file.close()
def __iter__(self):
"""
Allows easy iteration over pixels in obs file
use with 'for pixel in obsFileObject:'
yields a single pixel h5 dataset
MJS 3/28
Warning: if timeAdjustFile is loaded, the data from this
function will not be corrected for roach delays as in getPixel().
Use getPixel() instead.
"""
for iRow in xrange(self.nRow):
for iCol in xrange(self.nCol):
pixelLabel = self.beamImage[iRow][iCol]
pixelData = self.file.getNode('/' + pixelLabel)
yield pixelData
def loadFile(self, fileName,verbose=False):
"""
Opens file and loads obs file attributes and beammap
"""
if (os.path.isabs(fileName)):
self.fileName = os.path.basename(fileName)
self.fullFileName = fileName
else:
self.fileName = fileName
# make the full file name by joining the input name
# to the MKID_RAW_PATH (or . if the environment variable
# is not defined)
dataDir = os.getenv('MKID_RAW_PATH', '/')
self.fullFileName = os.path.join(dataDir, self.fileName)
if (not os.path.exists(self.fullFileName)):
msg='file does not exist: %s'%self.fullFileName
if verbose:
print msg
raise Exception(msg)
#open the hdf5 file
self.file = tables.openFile(self.fullFileName, mode='r')
#get the header
self.header = self.file.root.header.header
self.titles = self.header.colnames
try:
self.info = self.header[0] #header is a table with one row
except IndexError as inst:
if verbose:
print 'Can\'t read header for ',self.fullFileName
raise inst
# Useful information about data format set here.
# For now, set all of these as constants.
# If we get data taken with different parameters, straighten
# that all out here.
## These parameters are for LICK2012 and PAL2012 data
self.tickDuration = 1e-6 #s
self.ticksPerSec = int(1.0 / self.tickDuration)
self.intervalAll = interval[0.0, (1.0 / self.tickDuration) - 1]
self.nonAllocPixelName = '/r0/p250/'
# 8 bits - channel
# 12 bits - Parabola Fit Peak Height
# 12 bits - Sampled Peak Height
# 12 bits - Low pass filter baseline
# 20 bits - Microsecond timestamp
self.nBitsAfterParabolaPeak = 44
self.nBitsAfterBaseline = 20
self.nBitsInPulseHeight = 12
self.nBitsInTimestamp = 20
#bitmask of 12 ones
self.pulseMask = int(self.nBitsInPulseHeight * '1', 2)
#bitmask of 20 ones
self.timestampMask = int(self.nBitsInTimestamp * '1', 2)
#get the beam image.
try:
self.beamImage = self.file.getNode('/beammap/beamimage').read()
except Exception as inst:
if verbose:
print 'Can\'t access beamimage for ',self.fullFileName
raise inst
#format for a pixelName in beamImage is /r#/p#/t# where r# is the roach number, p# is the pixel number
# and t# is the starting timestamp
self.beamImageRoaches = np.array([[int(s.split('r')[1].split('/')[0]) for s in row] for row in self.beamImage])
self.beamImagePixelNums = np.array([[int(s.split('p')[1].split('/')[0]) for s in row] for row in self.beamImage])
beamShape = self.beamImage.shape
self.nRow = beamShape[0]
self.nCol = beamShape[1]
def checkIntegrity(self,firstSec=0,integrationTime=-1):
"""
Checks the obs file for corrupted end-of-seconds
Corruption is indicated by timestamps greater than 1/tickDuration=1e6
returns 0 if no corruption found
"""
corruptedPixels = []
for iRow in xrange(self.nRow):
for iCol in xrange(self.nCol):
packetList = self.getPixelPacketList(iRow,iCol,firstSec,integrationTime)
timestamps,parabolaPeaks,baselines = self.parsePhotonPackets(packetList)
if np.any(timestamps > 1./self.tickDuration):
print 'Corruption detected in pixel (',iRow,iCol,')'
corruptedPixels.append((iRow,iCol))
corruptionFound = len(corruptedPixels) != 0
return corruptionFound
# exptime = self.getFromHeader('exptime')
# lastSec = firstSec + integrationTime
# if integrationTime == -1:
# lastSec = exptime-1
#
# corruptedSecs = []
# for pixelCoord in corruptedPixels:
# for sec in xrange(firstSec,lastSec):
# packetList = self.getPixelPacketList(pixelCoord[0],pixelCoord[1],sec,integrationTime=1)
# timestamps,parabolaPeaks,baselines = self.parsePhotonPackets(packetList)
# if np.any(timestamps > 1./self.tickDuration):
# pixelLabel = self.beamImage[iRow][iCol]
# corruptedSecs.append(sec)
# print 'Corruption in pixel',pixelLabel, 'at',sec
def convertWvlToPhase(self, wavelengths, iRow, iCol):
const = self.wvlCalTable[iRow, iCol, 0]
lin_term = self.wvlCalTable[iRow, iCol, 1]
quad_term = self.wvlCalTable[iRow, iCol, 2]
wavelengths=np.asarray(wavelengths)
energies = ObsFile.h * ObsFile.c * ObsFile.angstromPerMeter / wavelengths
if quad_term==0:
phases = (energies - const)/lin_term
print phases
else:
phase1=(-1.*np.sqrt(-4.*const*quad_term + lin_term**2. + 4.*quad_term*energies) - lin_term)/(2.*quad_term)
print phase1
phase2=(np.sqrt(-4.*const*quad_term + lin_term**2. + 4.*quad_term*energies) - lin_term)/(2.*quad_term)
print phase2
phases=phase1
return phases
def convertToWvl(self, pulseHeights, iRow, iCol, excludeBad=True):
"""
applies wavelength calibration to a list of photon pulse heights
if excludeBad is True, wavelengths calculated as np.inf are excised from the array returned, as are wavelengths outside the fit limits of the wavecal
"""
#xOffset = self.wvlCalTable[iRow, iCol, 0]
#yOffset = self.wvlCalTable[iRow, iCol, 1]
#amplitude = self.wvlCalTable[iRow, iCol, 2]
#energies = amplitude * (pulseHeights - xOffset) ** 2 + yOffset
const = self.wvlCalTable[iRow, iCol, 0]
lin_term = self.wvlCalTable[iRow, iCol, 1]
quad_term = self.wvlCalTable[iRow, iCol, 2]
energies = const+lin_term*pulseHeights+quad_term*pulseHeights**2.0
wvlCalLowerLimit = self.wvlRangeTable[iRow, iCol, 1]
wvlCalUpperLimit = self.wvlRangeTable[iRow, iCol, 0]
#check if this pixel is completely valid in the wavelength range set for this ObsFile
#if not, cut out the photons from this pixel
if excludeBad and ((self.wvlUpperLimit != -1 and not self.wvlUpperLimit is None and wvlCalUpperLimit < self.wvlUpperLimit) or (self.wvlLowerLimit != -1 and not self.wvlLowerLimit is None and wvlCalLowerLimit > self.wvlLowerLimit)):
wavelengths = np.array([],dtype=np.double)
return wavelengths
if excludeBad == True:
energies = energies[energies != 0]
wavelengths = ObsFile.h * ObsFile.c * ObsFile.angstromPerMeter / energies
if excludeBad == True and self.wvlLowerLimit == -1:
wavelengths = wavelengths[wvlCalLowerLimit < wavelengths]
elif excludeBad == True and self.wvlLowerLimit != None:
wavelengths = wavelengths[self.wvlLowerLimit < wavelengths]
if excludeBad == True and self.wvlUpperLimit == -1:
wavelengths = wavelengths[wavelengths < wvlCalUpperLimit]
elif excludeBad == True and self.wvlUpperLimit != None:
wavelengths = wavelengths[wavelengths < self.wvlUpperLimit]
# if len(wavelengths) > 0 and self.flatCalFile != None:
# #filter out wavelengths without a valid flat weight
# pixelFlags = self.flatFlags[iRow,iCol]
# binIndices = np.digitize(wavelengths,self.flatCalWvlBins)-1
# wavelengths=wavelengths[np.logical_and(binIndices>=0,binIndices<len(pixelFlags))]
# binIndices=binIndices[np.logical_and(binIndices>=0,binIndices<len(pixelFlags))]
# flags = pixelFlags[binIndices]
# wavelengths = wavelengths[flags==1]
return wavelengths
def createEmptyPhotonListFile(self,*nkwargs,**kwargs):
"""
creates a photonList h5 file using header in headers.ArconsHeaders
Shifted functionality to photonlist/photlist.py, JvE May 10 2013.
See that function for input parameters and outputs.
"""
import photonlist.photlist #Here instead of at top to avoid circular imports
photonlist.photlist.createEmptyPhotonListFile(self,*nkwargs,**kwargs)
# def createEmptyPhotonListFile(self,fileName=None):
# """
# creates a photonList h5 file
# using header in headers.ArconsHeaders
#
# INPUTS:
# fileName - string, name of file to write to. If not supplied, default is used
# based on name of original obs. file and standard directories etc.
# (see usil.FileName). Added 4/29/2013, JvE
# """
#
# if fileName is None:
# fileTimestamp = self.fileName.split('_')[1].split('.')[0]
# fileDate = os.path.basename(os.path.dirname(self.fullFileName))
# run = os.path.basename(os.path.dirname(os.path.dirname(self.fullFileName)))
# fn = FileName(run=run, date=fileDate, tstamp=fileTimestamp)
# fullPhotonListFileName = fn.photonList()
# else:
# fullPhotonListFileName = fileName
# if (os.path.exists(fullPhotonListFileName)):
# if utils.confirm('Photon list file %s exists. Overwrite?' % fullPhotonListFileName, defaultResponse=False) == False:
# exit(0)
# zlibFilter = tables.Filters(complevel=1, complib='zlib', fletcher32=False)
# try:
# plFile = tables.openFile(fullPhotonListFileName, mode='w')
# plGroup = plFile.createGroup('/', 'photons', 'Group containing photon list')
# plTable = plFile.createTable(plGroup, 'photons', ArconsHeaders.PhotonList, 'Photon List Data',
# filters=zlibFilter,
# expectedrows=300000) #Temporary fudge to see if it helps!
# except:
# plFile.close()
# raise
# return plFile
def displaySec(self, firstSec=0, integrationTime= -1, weighted=False,
fluxWeighted=False, plotTitle='', nSdevMax=2,
scaleByEffInt=False, getRawCount=False, fignum=None, ds9=False,
pclip=1.0, **kw):
"""
plots a time-flattened image of the counts integrated from firstSec to firstSec+integrationTime
if integrationTime is -1, All time after firstSec is used.
if weighted is True, flat cal weights are applied
If fluxWeighted is True, apply flux cal weights.
if scaleByEffInt is True, then counts are scaled by effective exposure
time on a per-pixel basis.
nSdevMax - max end of stretch scale for display, in # sigmas above the mean.
getRawCount - if True the raw non-wavelength-calibrated image is
displayed with no wavelength cutoffs applied (in which case no wavecal
file need be loaded).
fignum - as for utils.plotArray (None = new window; False/0 = current window; or
specify target window number).
ds9 - boolean, if True, display in DS9 instead of regular plot window.
pclip - set to percentile level (in percent) to set the upper and lower bounds
of the colour scale.
**kw - any other keywords passed directly to utils.plotArray()
"""
secImg = self.getPixelCountImage(firstSec, integrationTime, weighted, fluxWeighted,
getRawCount=getRawCount,scaleByEffInt=scaleByEffInt)['image']
toPlot = np.copy(secImg)
vmin = np.percentile(toPlot[np.isfinite(toPlot)],pclip)
vmax = np.percentile(toPlot[np.isfinite(toPlot)],100.-pclip)
toPlot[np.isnan(toPlot)] = 0 #Just looks nicer when you plot it.
if ds9 is True:
utils.ds9Array(secImg)
else:
utils.plotArray(secImg, cbar=True, normMax=np.mean(secImg) + nSdevMax * np.std(secImg),
plotTitle=plotTitle, fignum=fignum, **kw)
def getFromHeader(self, name):
"""
Returns a requested entry from the obs file header
If asked for exptime (exposure time) and some roaches have a timestamp offset
The returned exposure time will be shortened by the max offset, since ObsFile
will not retrieve data from seconds in which some roaches do not have data.
This also affects unixtime (start of observation).
If asked for jd, the jd is calculated from the (corrected) unixtime
"""
entry = self.info[self.titles.index(name)]
if name=='exptime' and self.timeAdjustFile != None:
#shorten the effective exptime by the number of seconds that
#does not have data from all roaches
maxDelay = np.max(self.roachDelays)
entry -= maxDelay
if name=='unixtime' and self.timeAdjustFile != None:
#the way getPixel retrieves data accounts for individual roach delay,
#but shifted everything by np.max(self.roachDelays), relabeling sec maxDelay as sec 0
#so, add maxDelay to the header start time, so all times will be correct relative to it
entry += np.max(self.roachDelays)
entry += self.firmwareDelay
if name=='jd':
#The jd stored in the raw file header is the jd when the empty file is created
#but not when the observation starts. The actual value can be derived from the stored unixtime
unixEpochJD = 2440587.5
secsPerDay = 86400
unixtime = self.getFromHeader('unixtime')
entry = 1.*unixtime/secsPerDay+unixEpochJD
return entry
def getPixel(self, iRow, iCol, firstSec=0, integrationTime= -1):
"""
Retrieves a pixel using the file's attached beammap.
If firstSec/integrationTime are provided, only data from the time
interval 'firstSec' to firstSec+integrationTime are returned.
For now firstSec and integrationTime can only be integers.
If integrationTime is -1, all data after firstSec are returned.
MJS 3/28
Updated so if timeAdjustFile is loaded, data retrieved from roaches
with a delay will be offset to match other roaches. Also, if some roaches
have a delay, seconds in which some roaches don't have data are no longer
retrieved
"""
pixelLabel = self.beamImage[iRow][iCol]
pixelNode = self.file.getNode('/' + pixelLabel)
if self.timeAdjustFile != None:
iRoach = self.getRoachNum(iRow,iCol)
maxDelay = np.max(self.roachDelays)
#skip over any seconds that don't have data from all roaches
#and offset by roach delay so all roaches will match
firstSec += maxDelay-self.roachDelays[iRoach]
if integrationTime == -1:
lastSec = pixelNode.nrows-self.roachDelays[iRoach]
else:
lastSec = firstSec + integrationTime
else:
if integrationTime == -1:
lastSec = pixelNode.nrows
else:
lastSec = firstSec + integrationTime
pixelData = pixelNode.read(firstSec, lastSec)
#return {'pixelData':pixelData,'firstSec':firstSec,'lastSec':lastSec}
return pixelData
def setWvlDitherSeed(self,seed=None):
"""
sets the seed for the random number generator used to dither the wavelengths by an ADC value.
See getPixelWvlList.
seed - if seed is not specified or None, uses the default numpy.random.RandomState seed, which according to docs,
'will try to read data from /dev/urandom (or the Windows analogue) if available or seed from the clock otherwise'
"""
self.seed = seed
np.random.seed(seed)
def getPixelWvlList(self,iRow,iCol,firstSec=0,integrationTime=-1,excludeBad=True,dither=True,timeSpacingCut=None): #,getTimes=False):
"""
returns a numpy array of photon wavelengths for a given pixel, integrated from firstSec to firstSec+integrationTime.
if integrationTime is -1, All time after firstSec is used.
Now always accounts for any hot-pixel time masking and returns a
dictionary with keys:
timestamps
wavelengths
effIntTime (effective integration time)
JvE 3/5/2013
if excludeBad is True, relevant wavelength cuts are applied to timestamps and wavelengths before returning
[if getTimes is True, returns timestamps,wavelengths - OBSELETED - JvE 3/2/2013]
MJS 3/28/2013
if dither is True, uniform random values in the range (0,1) will be added to all quantized ADC values read, to remedy the effects of quantization
"""
#if getTimes == False:
# packetList = self.getPixelPacketList(iRow,iCol,firstSec,integrationTime)
# timestamps,parabolaPeaks,baselines = self.parsePhotonPackets(packetList)
#
#else:
x = self.getTimedPacketList(iRow, iCol, firstSec, integrationTime,timeSpacingCut=timeSpacingCut)
timestamps, parabolaPeaks, baselines, effIntTime, rawCounts = \
x['timestamps'], x['peakHeights'], x['baselines'], x['effIntTime'], x['rawCounts']
parabolaPeaks = np.array(parabolaPeaks,dtype=np.double)
baselines = np.array(baselines,dtype=np.double)
if dither==True:
parabolaPeaks += np.random.random_sample(len(parabolaPeaks))
baselines += np.random.random_sample(len(baselines))
pulseHeights = parabolaPeaks - baselines
#xOffset = self.wvlCalTable[iRow, iCol, 0]
#yOffset = self.wvlCalTable[iRow, iCol, 1]
#amplitude = self.wvlCalTable[iRow, iCol, 2]
#energies = amplitude * (pulseHeights - xOffset) ** 2 + yOffset
const = self.wvlCalTable[iRow, iCol, 0]
lin_term = self.wvlCalTable[iRow, iCol, 1]
quad_term = self.wvlCalTable[iRow, iCol, 2]
energies = const+lin_term*pulseHeights+quad_term*pulseHeights**2.0
wvlCalLowerLimit = self.wvlRangeTable[iRow, iCol, 1]
wvlCalLowerLimit = 0
wvlCalUpperLimit = self.wvlRangeTable[iRow, iCol, 0]
with np.errstate(divide='ignore'):
wavelengths = ObsFile.h*ObsFile.c*ObsFile.angstromPerMeter/energies
if excludeBad == True:
#check if this pixel is completely valid in the wavelength range set for this ObsFile
#if not, cut out the photons from this pixel
if (self.wvlUpperLimit != -1 and not self.wvlUpperLimit is None and wvlCalUpperLimit < self.wvlUpperLimit) or (self.wvlLowerLimit != -1 and not self.wvlLowerLimit is None and wvlCalLowerLimit > self.wvlLowerLimit):
wavelengths = np.array([],dtype=np.double)
timestamps = np.array([],dtype=np.double)
effIntTime=0
else:
goodMask = ~np.isnan(wavelengths)
goodMask = np.logical_and(goodMask,wavelengths!=np.inf)
if self.wvlLowerLimit == -1:
goodMask = np.logical_and(goodMask,wvlCalLowerLimit < wavelengths)
elif self.wvlLowerLimit != None:
goodMask = np.logical_and(goodMask,self.wvlLowerLimit < wavelengths)
if self.wvlUpperLimit == -1:
goodMask = np.logical_and(goodMask,wavelengths < wvlCalUpperLimit)
elif self.wvlUpperLimit != None:
goodMask = np.logical_and(goodMask,wavelengths < self.wvlUpperLimit)
wavelengths = wavelengths[goodMask]
timestamps = timestamps[goodMask]
return {'timestamps':timestamps, 'wavelengths':wavelengths,
'effIntTime':effIntTime, 'rawCounts':rawCounts}
def getPixelCount(self, iRow, iCol, firstSec=0, integrationTime= -1,
weighted=False, fluxWeighted=False, getRawCount=False, timeSpacingCut=None):
"""
returns the number of photons received in a given pixel from firstSec to firstSec + integrationTime
- if integrationTime is -1, all time after firstSec is used.
- if weighted is True, flat cal weights are applied
- if fluxWeighted is True, flux weights are applied.
- if getRawCount is True, the total raw count for all photon event detections
is returned irrespective of wavelength calibration, and with no wavelength
cutoffs (in this case, no wavecal file need have been applied, though
bad pixel time-masks *will* still be applied if present and switched 'on'.)
Otherwise will now always call getPixelSpectrum (which is also capable
of handling hot pixel removal) -- JvE 3/1/2013.
*Note getRawCount overrides weighted and fluxWeighted.
Updated to return effective exp. times; see below. -- JvE 3/2013.
Updated to return rawCounts; see below -- ABW Oct 7, 2014
OUTPUTS:
Return value is a dictionary with tags:
'counts':int, number of photon counts
'effIntTime':float, effective integration time after time-masking is
accounted for.
'rawCounts': int, total number of photon triggers (including noise)
"""
if getRawCount is True:
x = self.getTimedPacketList(iRow, iCol, firstSec=firstSec, integrationTime=integrationTime, timeSpacingCut=timeSpacingCut)
#x2 = self.getTimedPacketList_old(iRow, iCol, firstSec=firstSec, integrationTime=integrationTime)
#assert np.array_equal(x['timestamps'],x2['timestamps'])
#assert np.array_equal(x['effIntTime'],x2['effIntTime'])
#assert np.array_equal(x['peakHeights'],x2['peakHeights'])
#assert np.array_equal(x['baselines'],x2['baselines'])
timestamps, effIntTime, rawCounts = x['timestamps'], x['effIntTime'], x['rawCounts']
counts = len(timestamps)
return {'counts':counts, 'effIntTime':effIntTime, 'rawCounts':rawCounts}
else:
pspec = self.getPixelSpectrum(iRow, iCol, firstSec, integrationTime,weighted=weighted, fluxWeighted=fluxWeighted, timeSpacingCut=timeSpacingCut)
counts = sum(pspec['spectrum'])
### If it's weighted then deadtime should be corrected in getPixelSpectrum(), otherwise not ###
return {'counts':counts, 'effIntTime':pspec['effIntTime'], 'rawCounts':pspec['rawCounts']}
def getPixelLightCurve(self,iRow,iCol,firstSec=0,lastSec=-1,cadence=1,
**kwargs):
"""
Get a simple light curve for a pixel (basically a wrapper for getPixelCount).
INPUTS:
iRow,iCol - Row and column of pixel
firstSec - start time (sec) within obsFile to begin the light curve
lastSec - end time (sec) within obsFile for the light curve. If -1, returns light curve to end of file.
cadence - cadence (sec) of light curve. i.e., return values integrated every 'cadence' seconds.
**kwargs - any other keywords are passed on to getPixelCount (see above), including:
weighted
fluxWeighted (Note if True, then this should correct the light curve for effective exposure time due to bad pixels)
getRawCount
OUTPUTS:
A single one-dimensional array of flux counts integrated every 'cadence' seconds
between firstSec and lastSec. Note if step is non-integer may return inconsistent
number of values depending on rounding of last value in time step sequence (see
documentation for numpy.arange() ).
If hot pixel masking is turned on, then returns 0 for any time that is masked out.
(Maybe should update this to NaN at some point in getPixelCount?)
"""
if lastSec==-1:lSec = self.getFromHeader('exptime')
else: lSec = lastSec
return np.array([self.getPixelCount(iRow,iCol,firstSec=x,integrationTime=cadence,**kwargs)['counts']
for x in np.arange(firstSec,lSec,cadence)])
def plotPixelLightCurve(self,iRow,iCol,firstSec=0,lastSec=-1,cadence=1,**kwargs):
"""
Plot a simple light curve for a given pixel. Just a wrapper for getPixelLightCurve.
Also marks intervals flagged as bad with gray shaded regions if a hot pixel mask is
loaded.
"""
lc = self.getPixelLightCurve(iRow=iRow,iCol=iCol,firstSec=firstSec,lastSec=lastSec,
cadence=cadence,**kwargs)
if lastSec==-1: realLastSec = self.getFromHeader('exptime')
else: realLastSec = lastSec
#Plot the lightcurve
x = np.arange(firstSec+cadence/2.,realLastSec)
assert len(x)==len(lc) #In case there are issues with arange being inconsistent on the number of values it returns
plt.plot(x,lc)
plt.xlabel('Time since start of file (s)')
plt.ylabel('Counts')
plt.title(self.fileName+' - pixel x,y = '+str(iCol)+','+str(iRow))
#Get bad times in time range of interest (hot pixels etc.)
badTimes = self.getPixelBadTimes(iRow,iCol) & interval([firstSec,realLastSec]) #Returns an 'interval' instance
lcRange = np.nanmax(lc)-np.nanmin(lc)
for eachInterval in badTimes:
plt.fill_betweenx([np.nanmin(lc)-0.5*lcRange,np.nanmax(lc)+0.5*lcRange], eachInterval[0],eachInterval[1],
alpha=0.5,color='gray')
def getPixelPacketList(self, iRow, iCol, firstSec=0, integrationTime= -1):
"""
returns a numpy array of 64-bit photon packets for a given pixel, integrated from firstSec to firstSec+integrationTime.
if integrationTime is -1, All time after firstSec is used.
"""
# getPixelOutput = self.getPixel(iRow, iCol, firstSec, integrationTime)
# pixelData = getPixelOutput['pixelData']
pixelData = self.getPixel(iRow,iCol,firstSec,integrationTime)
packetList = np.concatenate(pixelData)
return packetList
def getTimedPacketList(self, iRow, iCol, firstSec=0, integrationTime= -1, timeSpacingCut=None,expTailTimescale=None):
"""
Parses an array of uint64 packets with the obs file format,and makes timestamps absolute
(with zero time at beginning of ObsFile).
Returns a list of:
timestamps (seconds from start of file),parabolaFitPeaks,baselines,effectiveIntTime (effective
integration time after accounting for time-masking.)
parses packets from firstSec to firstSec+integrationTime.
if integrationTime is -1, all time after firstSec is used.
if timeSpacingCut is not None [**units=seconds, presumably?**], photons sooner than timeSpacingCut seconds after the last photon are cut.
Typically we will set timeSpacingCut=1.e-3 (1 ms) to remove effects of photon pile-up
if expTailTimescale is not None, photons are assumed to exhibit an exponential decay back to baseline with e-fold time
expTailTimescale, this is used to subtract the exponential tail of one photon from the peakHeight of the next photon
This also attempts to counter effects of photon pile-up for short (<100 us) dead times. [**units?**]
Now updated to take advantage of masking capabilities in parsePhotonPackets
to allow for correct application of non-integer values in firstSec and
integrationTime. JvE Feb 27 2013.
CHANGED RETURN VALUES - now returns a dictionary including effective integration
time (allowing for bad pixel masking), with keys:
'timestamps'
'peakHeights'
'baselines'
'effIntTime'
'rawCounts'
- JvE 3/5/2013.
- ABW Oct 7, 2014. Added rawCounts for calculating dead time correction
**Modified to increase speed for integrations shorter than the full exposure
length. JvE 3/13/2013**
MJS 3/28/2012
**Modified to add known delays to timestamps from roach delays and firmware delay if timeAdjustFile
is loaded**
"""
#pixelData = self.getPixel(iRow, iCol)
lastSec = firstSec + integrationTime
#Make sure we include *all* the complete seconds that overlap the requested range
integerIntTime = int(np.ceil(lastSec)-np.floor(firstSec))
try:
pixelData = self.getPixel(iRow, iCol, firstSec=int(np.floor(firstSec)),
integrationTime=integerIntTime)
if integrationTime == -1 or integerIntTime > len(pixelData):
lastSec = int(np.floor(firstSec))+len(pixelData)
if self.hotPixIsApplied:
inter = self.getPixelBadTimes(iRow, iCol)
else:
inter = interval()
if self.cosmicMaskIsApplied:
inter = inter | self.cosmicMask
if (type(firstSec) is not int) or (type(integrationTime) is not int):
#Also exclude times outside firstSec to lastSec. Allows for sub-second
#(floating point) values in firstSec and integrationTime in the call to parsePhotonPackets.
inter = inter | interval([-np.inf, firstSec], [lastSec, np.inf]) #Union the exclusion interval with the excluded time range limits
#Inter now contains a single 'interval' instance, which contains a list of
#times to exclude, in seconds, including all times outside the requested
#integration if necessary.
#Calculate the total effective time for the integration after removing
#any 'intervals':
integrationInterval = interval([firstSec, lastSec])
maskedIntervals = inter & integrationInterval #Intersection of the integration and the bad times for this pixel (for calculating eff. int. time)
effectiveIntTime = (lastSec - firstSec) - utils.intervalSize(maskedIntervals)
if (inter == self.intervalAll) or len(pixelData) == 0:
timestamps = np.array([])
peakHeights = np.array([])
baselines = np.array([])
rawCounts = 0.
if inter == self.intervalAll:
effectiveIntTime = 0.
else:
parsedData = self.parsePhotonPacketLists(pixelData)
#timestamps = [(np.floor(firstSec)+iSec+(self.tickDuration*times)) for iSec,times in enumerate(parsedData['timestamps'])]
#timestamps = np.concatenate(timestamps)
lengths = np.array([len(times) for times in parsedData['timestamps']])
secOffsets = np.floor(firstSec)+np.concatenate([np.ones(length)*iSec for iSec,length in enumerate(lengths)])
#secOffsets = np.floor(firstSec)+np.concatenate([[iSec]*len(times) for iSec,times in enumerate(parsedData['timestamps'])])
times = np.concatenate(parsedData['timestamps'])
timestamps = secOffsets+(self.tickDuration*times)
baselines = np.concatenate(parsedData['baselines'])
peakHeights = np.concatenate(parsedData['parabolaFitPeaks'])
maskedDict = self.maskTimestamps(timestamps=timestamps,inter=inter,otherListsToFilter=[baselines,peakHeights])
timestamps = maskedDict['timestamps']
baselines,peakHeights = maskedDict['otherLists']
rawCounts = len(timestamps)
if expTailTimescale != None and len(timestamps) > 0:
#find the time between peaks
timeSpacing = np.diff(timestamps)
timeSpacing[timeSpacing < 0] = 1.
timeSpacing = np.append(1.,timeSpacing)#arbitrarily assume the first photon is 1 sec after the one before it
relPeakHeights = peakHeights-baselines
#assume each peak is riding on the tail of an exponential starting at the peak before it with e-fold time of expTailTimescale
print 'dt',timeSpacing[0:10]
expTails = (1.*peakHeights-baselines)*np.exp(-1.*timeSpacing/expTailTimescale)
print 'expTail',expTails[0:10]
print 'peak',peakHeights[0:10]
print 'peak-baseline',1.*peakHeights[0:10]-baselines[0:10]
print 'expT',np.exp(-1.*timeSpacing[0:10]/expTailTimescale)
#subtract off this exponential tail
peakHeights = np.array(peakHeights-expTails,dtype=np.int)
print 'peak',peakHeights[0:10]
if timeSpacingCut != None and len(timestamps) > 0:
timeSpacing = np.diff(timestamps)
timeSpacingMask = np.concatenate([[True],timeSpacing >= timeSpacingCut]) #include first photon and photons after who are at least timeSpacingCut after the previous photon
timestamps = timestamps[timeSpacingMask]
peakHeights = peakHeights[timeSpacingMask]
baselines = baselines[timeSpacingMask]
#diagnose("getTimed AAA",timestamps,peakHeights,baselines,None)
if expTailTimescale != None and len(timestamps) > 0:
#find the time between peaks
timeSpacing = np.diff(timestamps)
timeSpacing[timeSpacing < 0] = 1.
timeSpacing = np.append(1.,timeSpacing)#arbitrarily assume the first photon is 1 sec after the one before it
relPeakHeights = peakHeights-baselines
#assume each peak is riding on the tail of an exponential starting at the peak before it with e-fold time of expTailTimescale
print 30*"."," getTimed....."
print 'dt',timeSpacing[0:10]
expTails = (1.*peakHeights-baselines)*np.exp(-1.*timeSpacing/expTailTimescale)
print 'expTail',expTails[0:10]
print 'peak',peakHeights[0:10]
print 'peak-baseline',1.*peakHeights[0:10]-baselines[0:10]
print 'expT',np.exp(-1.*timeSpacing[0:10]/expTailTimescale)
#subtract off this exponential tail
peakHeights = np.array(peakHeights-expTails,dtype=np.int)
print 'peak',peakHeights[0:10]
except tables.exceptions.NoSuchNodeError: #h5 file is missing a pixel, treat as dead
timestamps = np.array([])
peakHeights = np.array([])
baselines = np.array([])
effectiveIntTime = 0.
rawCounts = 0.
return {'timestamps':timestamps, 'peakHeights':peakHeights,
'baselines':baselines, 'effIntTime':effectiveIntTime, 'rawCounts':rawCounts}
def getPixelCountImage(self, firstSec=0, integrationTime= -1, weighted=False,
fluxWeighted=False, getRawCount=False,
scaleByEffInt=False):
"""
Return a time-flattened image of the counts integrated from firstSec to firstSec+integrationTime.
- If integration time is -1, all time after firstSec is used.
- If weighted is True, flat cal weights are applied. JvE 12/28/12
- If fluxWeighted is True, flux cal weights are applied. SM 2/7/13
- If getRawCount is True then the raw non-wavelength-calibrated image is
returned with no wavelength cutoffs applied (in which case no wavecal
file need be loaded). *Note getRawCount overrides weighted and fluxWeighted
- If scaleByEffInt is True, any pixels that have 'bad' times masked out
will have their counts scaled up to match the equivalent integration
time requested.
RETURNS:
Dictionary with keys:
'image' - a 2D array representing the image
'effIntTimes' - a 2D array containing effective integration
times for each pixel.
'rawCounts' - a 2D array containing the raw number of counts
for each pixel.
"""
secImg = np.zeros((self.nRow, self.nCol))
effIntTimes = np.zeros((self.nRow, self.nCol), dtype=np.float64)
effIntTimes.fill(np.nan) #Just in case an element doesn't get filled for some reason.
rawCounts = np.zeros((self.nRow, self.nCol), dtype=np.float64)
rawCounts.fill(np.nan) #Just in case an element doesn't get filled for some reason.
for iRow in xrange(self.nRow):
for iCol in xrange(self.nCol):
pcount = self.getPixelCount(iRow, iCol, firstSec, integrationTime,
weighted, fluxWeighted, getRawCount)
secImg[iRow, iCol] = pcount['counts']
effIntTimes[iRow, iCol] = pcount['effIntTime']
rawCounts[iRow,iCol] = pcount['rawCounts']
if scaleByEffInt is True:
if integrationTime == -1:
totInt = self.getFromHeader('exptime')
else:
totInt = integrationTime
secImg *= (totInt / effIntTimes)
#if getEffInt is True:
return{'image':secImg, 'effIntTimes':effIntTimes, 'rawCounts':rawCounts}
#else:
# return secImg
def getAperturePixelCountImage(self, firstSec=0, integrationTime= -1, y_values=range(46), x_values=range(44), y_sky=[], x_sky=[], apertureMask=np.ones((46,44)), skyMask=np.zeros((46,44)), weighted=False, fluxWeighted=False, getRawCount=False, scaleByEffInt=False):
"""
Return a time-flattened image of the counts integrated from firstSec to firstSec+integrationTime
This aperture version subtracts out the average sky counts/pixel and includes scaling due to circular apertures. GD 5/27/13
If integration time is -1, all time after firstSec is used.
If weighted is True, flat cal weights are applied. JvE 12/28/12
If fluxWeighted is True, flux cal weights are applied. SM 2/7/13
If getRawCount is True then the raw non-wavelength-calibrated image is
returned with no wavelength cutoffs applied (in which case no wavecal
file need be loaded). JvE 3/1/13
If scaleByEffInt is True, any pixels that have 'bad' times masked out
will have their counts scaled up to match the equivalent integration
time requested.
RETURNS:
Dictionary with keys:
'image' - a 2D array representing the image
'effIntTimes' - a 2D array containing effective integration
times for each pixel.
"""
secImg = np.zeros((self.nRow, self.nCol))
effIntTimes = np.zeros((self.nRow, self.nCol), dtype=np.float64)
effIntTimes.fill(np.nan) #Just in case an element doesn't get filled for some reason.
skyValues=[]
objValues=[]
AreaSky=[]
AreaObj=[]
for pix in xrange(len(y_sky)):
pcount = self.getPixelCount(y_sky[pix], x_sky[pix], firstSec, integrationTime,weighted, fluxWeighted, getRawCount)
skyValue=pcount['counts']*skyMask[y_sky[pix]][x_sky[pix]]
skyValues.append(skyValue)
AreaSky.append(skyMask[y_sky[pix]][x_sky[pix]])
skyCountPerPixel = np.sum(skyValues)/(np.sum(AreaSky))
# print 'sky count per pixel =',skyCountPerPixel
for pix in xrange(len(y_values)):
pcount = self.getPixelCount(y_values[pix], x_values[pix], firstSec, integrationTime,weighted, fluxWeighted, getRawCount)
secImg[y_values[pix],x_values[pix]] = (pcount['counts']-skyCountPerPixel)*apertureMask[y_values[pix]][x_values[pix]]
AreaObj.append(apertureMask[y_values[pix]][x_values[pix]])
effIntTimes[y_values[pix],x_values[pix]] = pcount['effIntTime']
objValues.append(pcount['counts']*apertureMask[y_values[pix]][x_values[pix]])
AveObj=np.sum(objValues)/(np.sum(AreaObj))
# print 'ave obj per pixel (not sub) = ',AveObj
NumObjPhotons = np.sum(secImg)
# print 'lightcurve = ',NumObjPhotons
if scaleByEffInt is True:
secImg *= (integrationTime / effIntTimes)
#if getEffInt is True:
return{'image':secImg, 'effIntTimes':effIntTimes, 'SkyCountSubtractedPerPixel':skyCountPerPixel,'lightcurve':NumObjPhotons}
#else:
# return secImg
def getSpectralCube(self,firstSec=0,integrationTime=-1,weighted=True,fluxWeighted=True,wvlStart=3000,wvlStop=13000,wvlBinWidth=None,energyBinWidth=None,wvlBinEdges=None,timeSpacingCut=None):
"""
Return a time-flattened spectral cube of the counts integrated from firstSec to firstSec+integrationTime.
If integration time is -1, all time after firstSec is used.
If weighted is True, flat cal weights are applied.
If fluxWeighted is True, spectral shape weights are applied.
"""
cube = [[[] for iCol in range(self.nCol)] for iRow in range(self.nRow)]
effIntTime = np.zeros((self.nRow,self.nCol))
rawCounts = np.zeros((self.nRow,self.nCol))
for iRow in xrange(self.nRow):
for iCol in xrange(self.nCol):
x = self.getPixelSpectrum(pixelRow=iRow,pixelCol=iCol,
firstSec=firstSec,integrationTime=integrationTime,
weighted=weighted,fluxWeighted=fluxWeighted,wvlStart=wvlStart,wvlStop=wvlStop,
wvlBinWidth=wvlBinWidth,energyBinWidth=energyBinWidth,
wvlBinEdges=wvlBinEdges,timeSpacingCut=timeSpacingCut)
cube[iRow][iCol] = x['spectrum']
effIntTime[iRow][iCol] = x['effIntTime']
rawCounts[iRow][iCol] = x['rawCounts']
wvlBinEdges = x['wvlBinEdges']
cube = np.array(cube)
return {'cube':cube,'wvlBinEdges':wvlBinEdges,'effIntTime':effIntTime, 'rawCounts':rawCounts}
def getPixelSpectrum(self, pixelRow, pixelCol, firstSec=0, integrationTime= -1,
weighted=False, fluxWeighted=False, wvlStart=None, wvlStop=None,
wvlBinWidth=None, energyBinWidth=None, wvlBinEdges=None,timeSpacingCut=None):
"""
returns a spectral histogram of a given pixel integrated from firstSec to firstSec+integrationTime,
and an array giving the cutoff wavelengths used to bin the wavelength values
if integrationTime is -1, All time after firstSec is used.
if weighted is True, flat cal weights are applied
if weighted is False, flat cal weights are not applied
the wavelength bins used depends on the parameters given.
If energyBinWidth is specified, the wavelength bins use fixed energy bin widths
If wvlBinWidth is specified, the wavelength bins use fixed wavelength bin widths
If neither is specified and/or if weighted is True, the flat cal wvlBinEdges is used
wvlStart defaults to self.wvlLowerLimit or 3000
wvlStop defaults to self.wvlUpperLimit or 13000
----
Updated to return effective integration time for the pixel
Returns dictionary with keys:
'spectrum' - spectral histogram of given pixel.
'wvlBinEdges' - edges of wavelength bins
'effIntTime' - the effective integration time for the given pixel
after accounting for hot-pixel time-masking.
'rawCounts' - The total number of photon triggers (including from
the noise tail) during the effective exposure.
JvE 3/5/2013
ABW Oct 7, 2014. Added rawCounts to dictionary
----
"""
wvlStart=wvlStart if (wvlStart!=None and wvlStart>0.) else (self.wvlLowerLimit if (self.wvlLowerLimit!=None and self.wvlLowerLimit>0.) else 3000)
wvlStop=wvlStop if (wvlStop!=None and wvlStop>0.) else (self.wvlUpperLimit if (self.wvlUpperLimit!=None and self.wvlUpperLimit>0.) else 12000)
x = self.getPixelWvlList(pixelRow, pixelCol, firstSec, integrationTime,timeSpacingCut=timeSpacingCut)
wvlList, effIntTime, rawCounts = x['wavelengths'], x['effIntTime'], x['rawCounts']
if (weighted == False) and (fluxWeighted == True):
raise ValueError("Cannot apply flux cal without flat cal. Please load flat cal and set weighted=True")
if (self.flatCalFile is not None) and (((wvlBinEdges is None) and (energyBinWidth is None) and (wvlBinWidth is None)) or weighted == True):
#We've loaded a flat cal already, which has wvlBinEdges defined, and no other bin edges parameters are specified to override it.
spectrum, wvlBinEdges = np.histogram(wvlList, bins=self.flatCalWvlBins)
if weighted == True:#Need to apply flat weights by wavelenth
spectrum = spectrum * self.flatWeights[pixelRow, pixelCol]
if fluxWeighted == True:
spectrum = spectrum * self.fluxWeights
else:
if weighted == True:
raise ValueError('when weighted=True, flatCal wvl bins are used, so wvlBinEdges,wvlBinWidth,energyBinWidth,wvlStart,wvlStop should not be specified')
if wvlBinEdges is None:#We need to construct wvlBinEdges array
if energyBinWidth is not None:#Fixed energy binwidth specified
#Construct array with variable wvl binwidths
wvlBinEdges = ObsFile.makeWvlBins(energyBinWidth=energyBinWidth, wvlStart=wvlStart, wvlStop=wvlStop)
spectrum, wvlBinEdges = np.histogram(wvlList, bins=wvlBinEdges)
elif wvlBinWidth is not None:#Fixed wvl binwidth specified
nWvlBins = int((wvlStop - wvlStart) / wvlBinWidth)
spectrum, wvlBinEdges = np.histogram(wvlList, bins=nWvlBins, range=(wvlStart, wvlStop))
else:
raise ValueError('getPixelSpectrum needs either wvlBinWidth,wvlBinEnergy, or wvlBinEdges')
#else:
# nWvlBins = 1
# spectrum, wvlBinEdges = np.histogram(wvlList, bins=nWvlBins, range=(wvlStart, wvlStop))
else:#We are given wvlBinEdges array
spectrum, wvlBinEdges = np.histogram(wvlList, bins=wvlBinEdges)
if self.filterIsApplied == True:
if not np.array_equal(self.filterWvlBinEdges, wvlBinEdges):
raise ValueError("Synthetic filter wvlBinEdges do not match pixel spectrum wvlBinEdges!")
spectrum*=self.filterTrans
#if getEffInt is True:
return {'spectrum':spectrum, 'wvlBinEdges':wvlBinEdges, 'effIntTime':effIntTime, 'rawCounts':rawCounts}
#else:
# return spectrum,wvlBinEdges
def getApertureSpectrum(self, pixelRow, pixelCol, radius1, radius2, weighted=False,
fluxWeighted=False, lowCut=3000, highCut=7000,firstSec=0,integrationTime=-1):
'''
Creates a spectrum from a group of pixels. Aperture is defined by pixelRow and pixelCol of
center, as well as radius. Wave and flat cals should be loaded before using this
function. If no hot pixel mask is applied, taking the median of the sky rather than
the average to account for high hot pixel counts.
Will add more options as other pieces of pipeline become more refined.
(Note - not updated to handle loaded hot pixel time-masks - if applied,
behaviour may be unpredictable. JvE 3/5/2013).
'''
print 'Creating dead pixel mask...'
deadMask = self.getDeadPixels()
print 'Creating wavecal solution mask...'
bad_solution_mask = np.zeros((self.nRow, self.nCol))
for y in range(self.nRow):
for x in range(self.nCol):
if (self.wvlRangeTable[y][x][0] > lowCut or self.wvlRangeTable[y][x][1] < highCut):
bad_solution_mask[y][x] = 1
print 'Creating aperture mask...'
apertureMask = utils.aperture(pixelCol, pixelRow, radius=radius1)
print 'Creating sky mask...'
bigMask = utils.aperture(pixelCol, pixelRow, radius=radius2)
skyMask = bigMask - apertureMask
#if hotPixMask == None:
# y_values, x_values = np.where(np.logical_and(bad_solution_mask == 0, np.logical_and(apertureMask == 0, deadMask == 1)))
# y_sky, x_sky = np.where(np.logical_and(bad_solution_mask == 0, np.logical_and(skyMask == 0, deadMask == 1)))
#else:
# y_values, x_values = np.where(np.logical_and(bad_solution_mask == 0, np.logical_and(np.logical_and(apertureMask == 0, deadMask == 1), hotPixMask == 0)))
# y_sky, x_sky = np.where(np.logical_and(bad_solution_mask == 0, np.logical_and(np.logical_and(skyMask == 0, deadMask == 1), hotPixMask == 0)))
y_values, x_values = np.where(np.logical_and(bad_solution_mask == 0, np.logical_and(apertureMask == 0, deadMask == 1)))
y_sky, x_sky = np.where(np.logical_and(bad_solution_mask == 0, np.logical_and(skyMask == 0, deadMask == 1)))
#wvlBinEdges = self.getPixelSpectrum(y_values[0], x_values[0], weighted=weighted)['wvlBinEdges']
print 'Creating average sky spectrum...'
skyspectrum = []
for i in range(len(x_sky)):
specDict = self.getPixelSpectrum(y_sky[i],x_sky[i],weighted=weighted, fluxWeighted=fluxWeighted, firstSec=firstSec, integrationTime=integrationTime)
self.skySpectrumSingle,wvlBinEdges,self.effIntTime = specDict['spectrum'],specDict['wvlBinEdges'],specDict['effIntTime']
self.scaledSpectrum = self.skySpectrumSingle/self.effIntTime #scaled spectrum by effective integration time
#print "Sky spectrum"
#print self.skySpectrumSingle
#print "Int time"
#print self.effIntTime
skyspectrum.append(self.scaledSpectrum)
sky_array = np.zeros(len(skyspectrum[0]))
for j in range(len(skyspectrum[0])):
ispectrum = np.zeros(len(skyspectrum))
for i in range(len(skyspectrum)):
ispectrum[i] = skyspectrum[i][j]
sky_array[j] = np.median(ispectrum)
#if hotPixMask == None:
# sky_array[j] = np.median(ispectrum)
#else:
# sky_array[j] = np.average(ispectrum)
print 'Creating sky subtracted spectrum...'
spectrum = []
for i in range(len(x_values)):
specDict = self.getPixelSpectrum(y_values[i],x_values[i],weighted=weighted, fluxWeighted=fluxWeighted, firstSec=firstSec, integrationTime=integrationTime)
self.obsSpectrumSingle,wvlBinEdges,self.effIntTime = specDict['spectrum'],specDict['wvlBinEdges'],specDict['effIntTime']
self.scaledSpectrum = self.obsSpectrumSingle/self.effIntTime #scaled spectrum by effective integration time
spectrum.append(self.scaledSpectrum - sky_array)
#spectrum.append(self.getPixelSpectrum(y_values[i], x_values[i], weighted=weighted,fluxWeighted=fluxWeighted)['spectrum'] - sky_array)
summed_array = np.zeros(len(spectrum[0]))
for j in range(len(spectrum[0])):
ispectrum = np.zeros(len(spectrum))
for i in range(len(spectrum)):
ispectrum[i] = spectrum[i][j]
summed_array[j] = np.sum(ispectrum)
for i in range(len(summed_array)):
summed_array[i] /= (wvlBinEdges[i + 1] - wvlBinEdges[i])
return summed_array, wvlBinEdges
def getPixelBadTimes(self, pixelRow, pixelCol, reasons=[]):
"""
Get the time interval(s) for which a given pixel is bad (hot/cold,
whatever, from the hot pixel cal file).
Returns an 'interval' object (see pyinterval) of bad times (in seconds
from start of obs file).
"""
if self.hotPixTimeMask is None:
raise RuntimeError, 'No hot pixel file loaded'
return self.hotPixTimeMask.get_intervals(pixelRow,pixelCol,reasons)
def getDeadPixels(self, showMe=False, weighted=True, getRawCount=False):
"""
returns a mask indicating which pixels had no counts in this observation file
1's for pixels with counts, 0's for pixels without counts
if showMe is True, a plot of the mask pops up
"""
countArray = np.array([[(self.getPixelCount(iRow, iCol, weighted=weighted,getRawCount=getRawCount))['counts'] for iCol in range(self.nCol)] for iRow in range(self.nRow)])
deadArray = np.ones((self.nRow, self.nCol))
deadArray[countArray == 0] = 0
if showMe == True:
utils.plotArray(deadArray)
return deadArray
def getNonAllocPixels(self, showMe=False):
"""
returns a mask indicating which pixels had no beammap locations
(set to constant /r0/p250/)
1's for pixels with locations, 0's for pixels without locations
if showMe is True, a plot of the mask pops up
"""
nonAllocArray = np.ones((self.nRow, self.nCol))
nonAllocArray[np.core.defchararray.startswith(self.beamImage, self.nonAllocPixelName)] = 0
if showMe == True:
utils.plotArray(nonAllocArray)
return nonAllocArray
def getRoachNum(self,iRow,iCol):
pixelLabel = self.beamImage[iRow][iCol]
iRoach = int(pixelLabel.split('r')[1][0])
return iRoach
def getFrame(self, firstSec=0, integrationTime=-1):
"""
return a 2d array of numbers with the integrated flux per pixel,
suitable for use as a frame in util/utils.py function makeMovie
firstSec=0 is the starting second to include
integrationTime=-1 is the number of seconds to include, or -1
to include all to the end of this file
output: the frame, in photons per pixel, a 2d numpy array of
np.unint32
"""
frame = np.zeros((self.nRow,self.nCol),dtype=np.uint32)
for iRow in range(self.nRow):
for iCol in range(self.nCol):
pl = self.getTimedPacketList(iRow,iCol,
firstSec,integrationTime)
nphoton = pl['timestamps'].size
frame[iRow][iCol] += nphoton
return frame
# a different way to get, with the functionality of getTimedPacketList
def getPackets(self, iRow, iCol, firstSec, integrationTime,
fields=(),
expTailTimescale=None,
timeSpacingCut=None,
timeMaskLast=True):
"""
get and parse packets for pixel iRow,iCol starting at firstSec for integrationTime seconds.
fields is a list of strings to indicate what to parse in
addition to timestamps: allowed values are 'peakHeights' and
'baselines'
expTailTimescale (if not None) subtractes the exponentail tail
off of one photon from the peakHeight of the next photon.
This also attempts to counter effects of photon pile-up for
short (< 100 us) dead times.
timeSpacingCut (if not None) rejects photons sooner than
timeSpacingCut seconds after the last photon.
timeMaskLast -- apply time masks after timeSpacingCut and expTailTimescale.
set this to "false" to mimic behavior of getTimedPacketList
return a dictionary containing:
effectiveIntTime (n seconds)
timestamps
other fields requested
"""
warnings.warn('Does anyone use this function?? If not, we should get rid of it')
parse = {'peakHeights': True, 'baselines': True}
for key in parse.keys():
try:
fields.index(key)
except ValueError:
parse[key] = False
lastSec = firstSec+integrationTime
# Work out inter, the times to mask
# start with nothing being masked
inter = interval()
# mask the hot pixels if requested
if self.hotPixIsApplied:
inter = self.getPixelBadTimes(iRow, iCol)
# mask cosmics if requested
if self.cosmicMaskIsApplied:
inter = inter | self.cosmicMask
# mask the fraction of the first integer second not requested
firstSecInt = int(np.floor(firstSec))
if (firstSec > firstSecInt):
inter = inter | interval([firstSecInt, firstSec])
# mask the fraction of the last integer second not requested
lastSecInt = int(np.ceil(firstSec+integrationTime))
integrationTimeInt = lastSecInt-firstSecInt
if (lastSec < lastSecInt):
inter = inter | interval([lastSec, lastSecInt])
#Calculate the total effective time for the integration after removing
#any 'intervals':
integrationInterval = interval([firstSec, lastSec])
maskedIntervals = inter & integrationInterval #Intersection of the integration and the bad times for this pixel.
effectiveIntTime = integrationTime - utils.intervalSize(maskedIntervals)
pixelData = self.getPixel(iRow, iCol, firstSec=firstSecInt,
integrationTime=integrationTimeInt)
# calculate how long a np array needs to be to hold everything
nPackets = 0
for packets in pixelData:
nPackets += len(packets)
# create empty arrays
timestamps = np.empty(nPackets, dtype=np.float)
if parse['peakHeights']: peakHeights=np.empty(nPackets, np.int16)
if parse['baselines']: baselines=np.empty(nPackets, np.int16)
# fill in the arrays one second at a time
ipt = 0
t = firstSecInt
for packets in pixelData:
iptNext = ipt+len(packets)
timestamps[ipt:iptNext] = \
t + np.bitwise_and(packets,self.timestampMask)*self.tickDuration
if parse['peakHeights']:
peakHeights[ipt:iptNext] = np.bitwise_and(
np.right_shift(packets, self.nBitsAfterParabolaPeak),
self.pulseMask)
if parse['baselines']:
baselines[ipt:iptNext] = np.bitwise_and(
np.right_shift(packets, self.nBitsAfterBaseline),
self.pulseMask)
ipt = iptNext
t += 1
if not timeMaskLast:
# apply time masks
# create a mask, "True" mean mask value
# the call to makeMask dominates the running time
if self.makeMaskVersion == 'v1':
mask = ObsFile.makeMaskV1(timestamps, inter)
else:
mask = ObsFile.makeMaskV2(timestamps, inter)
tsMaskedArray = ma.array(timestamps,mask=mask)
timestamps = ma.compressed(tsMaskedArray)
if parse['peakHeights']:
peakHeights = \
ma.compressed(ma.array(peakHeights,mask=mask))
if parse['baselines']:
baselines = \
ma.compressed(ma.array(baselines,mask=mask))
#diagnose("getPackets AAA",timestamps,peakHeights,baselines,None)
if expTailTimescale != None and len(timestamps) > 0:
#find the time between peaks
timeSpacing = np.diff(timestamps)
timeSpacing[timeSpacing < 0] = 1.
timeSpacing = np.append(1.,timeSpacing)#arbitrarily assume the first photon is 1 sec after the one before it
# relPeakHeights not used?
#relPeakHeights = peakHeights-baselines
#assume each peak is riding on the tail of an exponential starting at the peak before it with e-fold time of expTailTimescale
#print 30*"."," getPackets"
#print 'dt',timeSpacing[0:10]
expTails = (1.*peakHeights-baselines)*np.exp(-1.*timeSpacing/expTailTimescale)
#print 'expTail',expTails[0:10]
#print 'peak',peakHeights[0:10]
#print 'peak-baseline',1.*peakHeights[0:10]-baselines[0:10]
#print 'expT',np.exp(-1.*timeSpacing[0:10]/expTailTimescale)
#subtract off this exponential tail
peakHeights = np.array(peakHeights-expTails,dtype=np.int)
#print 'peak',peakHeights[0:10]
if timeSpacingCut != None and len(timestamps) > 0:
timeSpacing = np.diff(timestamps)
#include first photon and photons after who are at least
#timeSpacingCut after the previous photon
timeSpacingMask = np.concatenate([[True],timeSpacing >= timeSpacingCut])
timestamps = timestamps[timeSpacingMask]
if parse['peakHeights']:
peakHeights = peakHeights[timeSpacingMask]
if parse['baselines']:
baselines = baselines[timeSpacingMask]
if timeMaskLast:
# apply time masks
# create a mask, "True" mean mask value
# the call to makeMask dominates the running time
if self.makeMaskVersion == 'v1':
mask = ObsFile.makeMaskV1(timestamps, inter)
else:
mask = ObsFile.makeMaskV2(timestamps, inter)
tsMaskedArray = ma.array(timestamps,mask=mask)
timestamps = ma.compressed(tsMaskedArray)
if parse['peakHeights']:
peakHeights = \
ma.compressed(ma.array(peakHeights,mask=mask))
if parse['baselines']:
baselines = \
ma.compressed(ma.array(baselines,mask=mask))
# build up the dictionary of values and return it
retval = {"effIntTime": effectiveIntTime,
"timestamps":timestamps}
if parse['peakHeights']:
retval['peakHeights'] = peakHeights
if parse['baselines']:
retval['baselines'] = baselines
return retval
@staticmethod
def makeMask01(timestamps, inter):
def myfunc(x): return inter.__contains__(x)
vecfunc = vectorize(myfunc,otypes=[np.bool])
return vecfunc(timestamps)
@staticmethod
def makeMask(timestamps, inter):
"""
return an array of booleans, the same length as timestamps,
with that value inter.__contains__(timestamps[i])
"""
return ObsFile.makeMaskV2(timestamps, inter)
@staticmethod
def makeMaskV1(timestamps, inter):
"""
return an array of booleans, the same length as timestamps,
with that value inter.__contains__(timestamps[i])
"""
retval = np.empty(len(timestamps),dtype=np.bool)
ainter = np.array(inter)
t0s = ainter[:,0]
t1s = ainter[:,1]
tMin = t0s[0]
tMax = t1s[-1]
for i in range(len(timestamps)):
ts = timestamps[i]
if ts < tMin:
retval[i] = False
elif ts > tMax:
retval[i] = False
else:
tIndex = np.searchsorted(t0s, ts)
t0 = t0s[tIndex-1]
t1 = t1s[tIndex-1]
if ts < t1:
retval[i] = True
else:
retval[i] = False
return retval
@staticmethod
def makeMaskV2(timestamps, inter):
"""
return an array of booleans, the same length as timestamps,
with that value inter.__contains__(timestamps[i])
"""
lt = len(timestamps)
retval = np.zeros(lt,dtype=np.bool)
for i in inter:
if len(i) == 2:
i0 = np.searchsorted(timestamps,i[0])
if i0 == lt: break # the intervals are later than timestamps
i1 = np.searchsorted(timestamps,i[1])
if i1 > 0:
i0 = max(i0,0)
retval[i0:i1] = True
return retval
def loadBeammapFile(self,beammapFileName):
"""
Load an external beammap file in place of the obsfile's attached beamma
Can be used to correct pixel location mistakes.
NB: Do not use after loadFlatCalFile
"""
#get the beam image.
scratchDir = os.getenv('MKID_PROC_PATH', '/')
beammapPath = os.path.join(scratchDir, 'pixRemap')
fullBeammapFileName = os.path.join(beammapPath, beammapFileName)
if (not os.path.exists(fullBeammapFileName)):
print 'Beammap file does not exist: ', fullBeammapFileName
return
if (not os.path.exists(beammapFileName)):
#get the beam image.
scratchDir = os.getenv('MKID_PROC_PATH', '/')
beammapPath = os.path.join(scratchDir, 'pixRemap')
fullBeammapFileName = os.path.join(beammapPath, beammapFileName)
if (not os.path.exists(fullBeammapFileName)):
print 'Beammap file does not exist: ', fullBeammapFileName
return
else:
fullBeammapFileName = beammapFileName
beammapFile = tables.openFile(fullBeammapFileName,'r')
self.beammapFileName = fullBeammapFileName
try:
old_tstamp = self.beamImage[0][0].split('/')[-1]
self.beamImage = beammapFile.getNode('/beammap/beamimage').read()
if self.beamImage[0][0].split('/')[-1]=='':
self.beamImage = np.core.defchararray.add(self.beamImage,old_tstamp)
self.beamImageRoaches = np.array([[int(s.split('r')[1].split('/')[0]) for s in row] for row in self.beamImage])
self.beamImagePixelNums = np.array([[int(s.split('p')[1].split('/')[0]) for s in row] for row in self.beamImage])
except Exception as inst:
print 'Can\'t access beamimage for ',self.fullFileName
beamShape = self.beamImage.shape
self.nRow = beamShape[0]
self.nCol = beamShape[1]
beammapFile.close()
def loadCentroidListFile(self, centroidListFileName):
"""
Load an astrometry (centroid list) file into the
current obs file instance.
"""
scratchDir = os.getenv('MKID_PROC_PATH', '/')
centroidListPath = os.path.join(scratchDir, 'centroidListFiles')
fullCentroidListFileName = os.path.join(centroidListPath, centroidListFileName)
if (not os.path.exists(fullCentroidListFileName)):
print 'Astrometry centroid list file does not exist: ', fullCentroidListFileName
return
self.centroidListFile = tables.openFile(fullCentroidListFileName)
self.centroidListFileName = fullCentroidListFileName
def loadFlatCalFile(self, flatCalFileName):
"""
loads the flat cal factors from the given file
NB: if you are going to load a different beammap, call loadBeammapFile before this function
"""
scratchDir = os.getenv('MKID_PROC_PATH', '/')
flatCalPath = os.path.join(scratchDir, 'flatCalSolnFiles')
fullFlatCalFileName = os.path.join(flatCalPath, flatCalFileName)
if (not os.path.exists(fullFlatCalFileName)):
print 'flat cal file does not exist: ', fullFlatCalFileName
raise Exception('flat cal file {} does not exist'.format(fullFlatCalFileName))
self.flatCalFile = tables.openFile(fullFlatCalFileName, mode='r')
self.flatCalFileName = fullFlatCalFileName
self.flatCalWvlBins = self.flatCalFile.root.flatcal.wavelengthBins.read()
self.nFlatCalWvlBins = len(self.flatCalWvlBins)-1
self.flatWeights = np.zeros((self.nRow,self.nCol,self.nFlatCalWvlBins),dtype=np.double)
self.flatFlags = np.zeros((self.nRow,self.nCol,self.nFlatCalWvlBins),dtype=np.uint16)
try:
flatCalSoln = self.flatCalFile.root.flatcal.calsoln.read()
for calEntry in flatCalSoln:
entryRows,entryCols = np.where((calEntry['roach'] == self.beamImageRoaches) & (calEntry['pixelnum'] == self.beamImagePixelNums))
try:
entryRow = entryRows[0]
entryCol = entryCols[0]
self.flatWeights[entryRow,entryCol,:] = calEntry['weights']
self.flatFlags[entryRow,entryCol,:] = calEntry['weightFlags']
except IndexError: #entry for an unbeammapped pixel
pass
except tables.exceptions.NoSuchNodeError:
#loading old (beammap-dependant) flat cal
print 'loading old (beammap-dependant) flat cal'
self.flatWeights = self.flatCalFile.root.flatcal.weights.read()
self.flatFlags = self.flatCalFile.root.flatcal.flags.read()
def loadFluxCalFile(self, fluxCalFileName):
"""
loads the flux cal factors from the given file
"""
scratchDir = os.getenv('MKID_PROC_PATH', '/')
fluxCalPath = os.path.join(scratchDir, 'fluxCalSolnFiles')
fullFluxCalFileName = os.path.join(fluxCalPath, fluxCalFileName)
if (not os.path.exists(fullFluxCalFileName)):
print 'flux cal file does not exist: ', fullFluxCalFileName
raise IOError
self.fluxCalFile = tables.openFile(fullFluxCalFileName, mode='r')
self.fluxCalFileName = fullFluxCalFileName
self.fluxWeights = self.fluxCalFile.root.fluxcal.weights.read()
self.fluxFlags = self.fluxCalFile.root.fluxcal.flags.read()
self.fluxCalWvlBins = self.fluxCalFile.root.fluxcal.wavelengthBins.read()
self.nFluxCalWvlBins = self.nFlatCalWvlBins
def loadHotPixCalFile(self, hotPixCalFileName, switchOnMask=True,reasons=[]):
"""
Included for backward compatibility, simply calls loadTimeMask
"""
self.loadTimeMask(timeMaskFileName=hotPixCalFileName,switchOnMask=switchOnMask,reasons=reasons)
def loadTimeMask(self, timeMaskFileName, switchOnMask=True,reasons=[]):
"""
Load a hot pixel time mask from the given file, in a similar way to
loadWvlCalFile, loadFlatCalFile, etc. Switches on hot pixel
masking by default.
Set switchOnMask=False to prevent switching on hot pixel masking.
"""
import hotpix.hotPixels as hotPixels #Here instead of at top to prevent circular import problems.
scratchDir = os.getenv('MKID_PROC_PATH', '/')
timeMaskPath = os.path.join(scratchDir, 'timeMasks')
fullTimeMaskFileName = os.path.join(timeMaskPath, timeMaskFileName)
if (not os.path.exists(fullTimeMaskFileName)):
print 'time mask file does not exist: ', fullTimeMaskFileName
raise IOError
self.hotPixFile = tables.openFile(fullTimeMaskFileName)
self.hotPixTimeMask = hotPixels.readHotPixels(self.hotPixFile, reasons=reasons)
self.hotPixFileName = fullTimeMaskFileName
if (os.path.basename(self.hotPixTimeMask.obsFileName)
!= os.path.basename(self.fileName)):
warnings.warn('Mismatch between hot pixel time mask file and obs file. Not loading/applying mask!')
self.hotPixTimeMask = None
raise ValueError
else:
if switchOnMask: self.switchOnHotPixTimeMask(reasons=reasons)
def loadStandardCosmicMask(self, switchOnCosmicMask=True):
"""
call this method to load the cosmic mask file from the standard location,
defined in Filename
"""
fileName = FileName(obsFile=self)
cfn = fileName.cosmicMask()
self.loadCosmicMask(cosmicMaskFileName = cfn, switchOnCosmicMask=switchOnCosmicMask)
def loadCosmicMask(self, cosmicMaskFileName=None, switchOnCosmicMask=True):
self.cosmicMask = ObsFile.readCosmicIntervalFromFile(cosmicMaskFileName)
self.cosmicMaskFileName = os.path.abspath(cosmicMaskFileName)
if switchOnCosmicMask: self.switchOnCosmicTimeMask()
def setCosmicMask(self, cosmicMask, switchOnCosmicMask=True):
self.cosmicMask = cosmicMask
if switchOnCosmicMask: self.switchOnCosmicTimeMask()
def loadTimeAdjustmentFile(self,timeAdjustFileName,verbose=False):
"""
loads obsfile specific adjustments to add to all timestamps read
adjustments are read from timeAdjustFileName
it is suggested to pass timeAdjustFileName=FileName(run=run).timeAdjustments()
"""
self.timeAdjustFile = tables.openFile(timeAdjustFileName)
self.firmwareDelay = self.timeAdjustFile.root.timeAdjust.firmwareDelay.read()[0]['firmwareDelay']
roachDelayTable = self.timeAdjustFile.root.timeAdjust.roachDelays
try:
self.roachDelays = roachDelayTable.readWhere('obsFileName == "%s"'%self.fileName)[0]['roachDelays']
self.timeAdjustFileName = os.path.abspath(timeAdjustFileName)
except:
self.timeAdjustFile.close()
self.timeAdjustFile=None
self.timeAdjustFileName=None
del self.firmwareDelay
if verbose:
print 'Unable to load time adjustment for '+self.fileName
raise
def loadBestWvlCalFile(self,master=True):
"""
Searchs the waveCalSolnFiles directory tree for the best wavecal to apply to this obsfile.
if master==True then it first looks for a master wavecal solution
"""
#scratchDir = os.getenv('MKID_PROC_PATH', '/')
#run = FileName(obsFile=self).run
#wvlDir = scratchDir+"/waveCalSolnFiles/"+run+'/'
wvlDir = os.path.dirname(os.path.dirname(FileName(obsFile=self).mastercalSoln()))
#print wvlDir
obs_t_num = strpdate2num("%Y%m%d-%H%M%S")(FileName(obsFile=self).tstamp)
wvlCalFileName = None
wvl_t_num = None
for root,dirs,files in os.walk(wvlDir):
for f in files:
if f.endswith('.h5') and ((master and f.startswith('mastercal_')) or (not master and f.startswith('calsol_'))):
tstamp=(f.split('_')[1]).split('.')[0]
t_num=strpdate2num("%Y%m%d-%H%M%S")(tstamp)
if t_num < obs_t_num and (wvl_t_num == None or t_num > wvl_t_num):
wvl_t_num = t_num
wvlCalFileName = root+os.sep+f
if wvlCalFileName==None or not os.path.exists(str(wvlCalFileName)):
if master:
print "Could not find master wavecal solutions"
self.loadBestWvlCalFile(master=False)
else:
print "Searched "+wvlDir+" but no appropriate wavecal solution found"
raise IOError
else:
self.loadWvlCalFile(wvlCalFileName)
def loadWvlCalFile(self, wvlCalFileName):
"""
loads the wavelength cal coefficients from a given file
"""
if os.path.exists(str(wvlCalFileName)):
fullWvlCalFileName = str(wvlCalFileName)
else:
#scratchDir = os.getenv('MKID_PROC_PATH', '/')
#wvlDir = os.path.join(scratchDir, 'waveCalSolnFiles')
wvlDir = os.path.dirname(os.path.dirname(FileName(obsFile=self).mastercalSoln()))
fullWvlCalFileName = os.path.join(wvlDir, str(wvlCalFileName))
try:
# If the file has already been loaded for this ObsFile then just return
if hasattr(self,"wvlCalFileName") and (self.wvlCalFileName == fullWvlCalFileName):
return
self.wvlCalFile = tables.openFile(fullWvlCalFileName, mode='r')
self.wvlCalFileName = fullWvlCalFileName
wvlCalData = self.wvlCalFile.root.wavecal.calsoln
self.wvlCalTable = np.zeros([self.nRow, self.nCol, ObsFile.nCalCoeffs])
self.wvlErrorTable = np.zeros([self.nRow, self.nCol])
self.wvlFlagTable = np.zeros([self.nRow, self.nCol])
self.wvlRangeTable = np.zeros([self.nRow, self.nCol, 2])
for calPixel in wvlCalData:
#use the current loaded beammap
entryRows,entryCols = np.where((calPixel['roach'] == self.beamImageRoaches) & (calPixel['pixelnum'] == self.beamImagePixelNums))
try:
entryRow = entryRows[0]
entryCol = entryCols[0]
self.wvlFlagTable[entryRow,entryCol] = calPixel['wave_flag']
self.wvlErrorTable[entryRow,entryCol] = calPixel['sigma']
if calPixel['wave_flag'] == 0:
self.wvlCalTable[entryRow,entryCol] = calPixel['polyfit']
self.wvlRangeTable[entryRow,entryCol] = calPixel['solnrange']
except IndexError: #entry for an unbeammapped pixel
pass
# for calPixel in wvlCalData:
# #rely on the beammap loaded when the cal was done
# self.wvlFlagTable[calPixel['pixelrow']][calPixel['pixelcol']] = calPixel['wave_flag']
# self.wvlErrorTable[calPixel['pixelrow']][calPixel['pixelcol']] = calPixel['sigma']
# if calPixel['wave_flag'] == 0:
# self.wvlCalTable[calPixel['pixelrow']][calPixel['pixelcol']] = calPixel['polyfit']
# self.wvlRangeTable[calPixel['pixelrow']][calPixel['pixelcol']] = calPixel['solnrange']
except IOError:
print 'wavelength cal file does not exist: ', fullWvlCalFileName
raise
def loadAllCals(self,calLookupTablePath=None,wvlCalPath=None,flatCalPath=None,
fluxCalPath=None,timeMaskPath=None,timeAdjustmentPath=None,cosmicMaskPath=None,
beammapPath=None,centroidListPath=None):
"""
loads all possible cal files from parameters or a calLookupTable. To avoid loading a particular cal, set the corresponding parameter to the empty string ''
"""
calLookupTable = CalLookupFile(path=calLookupTablePath)
_,_,obsTstamp = FileName(obsFile=self).getComponents()
if beammapPath is None:
beammapPath = calLookupTable.beammap(obsTstamp)
if beammapPath != '':
self.loadBeammapFile(beammapPath)
print 'loaded beammap',beammapPath
else:
print 'did not load new beammap'
if wvlCalPath is None:
wvlCalPath = calLookupTable.calSoln(obsTstamp)
if wvlCalPath != '':
self.loadWvlCalFile(wvlCalPath)
print 'loaded wavecal',self.wvlCalFileName
else:
print 'did not load wavecal'
if flatCalPath is None:
flatCalPath = calLookupTable.flatSoln(obsTstamp)
if flatCalPath != '':
self.loadFlatCalFile(flatCalPath)
print 'loaded flatcal',self.flatCalFileName
else:
print 'did not load flatcal'
if fluxCalPath is None:
fluxCalPath = calLookupTable.fluxSoln(obsTstamp)
if fluxCalPath != '':
self.loadFluxCalFile(fluxCalPath)
print 'loaded fluxcal',self.fluxCalFileName
else:
print 'did not load fluxcal'
if timeMaskPath is None:
timeMaskPath = calLookupTable.timeMask(obsTstamp)
if timeMaskPath != '':
self.loadTimeMask(timeMaskPath)
print 'loaded time mask',timeMaskPath
else:
print 'did not load time mask'
if timeAdjustmentPath is None:
timeAdjustmentPath = calLookupTable.timeAdjustments(obsTstamp)
if timeAdjustmentPath != '':
self.loadTimeAdjustmentFile(timeAdjustmentPath)
print 'loaded time adjustments',self.timeAdjustFileName
else:
print 'did not load time adjustments'
if cosmicMaskPath is None:
cosmicMaskPath = calLookupTable.cosmicMask(obsTstamp)
if cosmicMaskPath != '':
self.loadCosmicMask(cosmicMaskPath)
print 'loaded cosmic mask',self.cosmicMaskFileName
else:
print 'did not load cosmic mask'
if centroidListPath is None:
centroidListPath = calLookupTable.centroidList(obsTstamp)
if centroidListPath != '':
self.loadCentroidListFile(centroidListPath)
print 'loaded centroid list',self.centroidListFileName
else:
print 'did not load centroid list'
def loadFilter(self, filterName = 'V', wvlBinEdges = None,switchOnFilter = True):
'''
'''
std = MKIDStd.MKIDStd()
self.rawFilterWvls, self.rawFilterTrans = std._loadFilter(filterName)
#check to see if wvlBinEdges are provided, and make them if not
if wvlBinEdges == None:
if self.flatCalFile is not None:
print "No wvlBinEdges provided, using bins defined by flatCalFile"
wvlBinEdges = self.flatCalWvlBins
else:
raise ValueError("No wvlBinEdges provided. Please load flatCalFile or make bins with ObsFile.makeWvlBins")
self.rawFilterTrans/=max(self.rawFilterTrans) #normalize filter to 1
rebinned = utils.rebin(self.rawFilterWvls, self.rawFilterTrans, wvlBinEdges)
self.filterWvlBinEdges = wvlBinEdges
self.filterWvls = rebinned[:,0]
self.filterTrans = rebinned[:,1]
self.filterTrans[np.isnan(self.filterTrans)] = 0.0
if switchOnFilter: self.switchOnFilter()
def switchOffFilter(self):
self.filterIsApplied = False
print "Turned off synthetic filter"
def switchOnFilter(self):
if self.filterTrans != None:
self.filterIsApplied = True
print "Turned on synthetic filter"
else:
print "No filter loaded! Use loadFilter to select a filter first"
self.filterIsApplied = False
@staticmethod
def makeWvlBins(energyBinWidth=.1, wvlStart=3000, wvlStop=13000):
"""
returns an array of wavlength bin edges, with a fixed energy bin width
withing the limits given in wvlStart and wvlStop
Args:
energyBinWidth: bin width in eV
wvlStart: Lower wavelength edge in Angstrom
wvlStop: Upper wavelength edge in Angstrom
Returns:
an array of wavelength bin edges that can be used with numpy.histogram(bins=wvlBinEdges)
"""
#Calculate upper and lower energy limits from wavelengths
#Note that start and stop switch when going to energy
energyStop = ObsFile.h * ObsFile.c * ObsFile.angstromPerMeter / wvlStart
energyStart = ObsFile.h * ObsFile.c * ObsFile.angstromPerMeter / wvlStop
nWvlBins = int((energyStop - energyStart) / energyBinWidth)
#Construct energy bin edges
energyBins = np.linspace(energyStart, energyStop, nWvlBins + 1)
#Convert back to wavelength and reverse the order to get increasing wavelengths
wvlBinEdges = np.array(ObsFile.h * ObsFile.c * ObsFile.angstromPerMeter / energyBins)
wvlBinEdges = wvlBinEdges[::-1]
return wvlBinEdges
def parsePhotonPacketLists(self, packets, doParabolaFitPeaks=True, doBaselines=True):
"""
Parses an array of uint64 packets with the obs file format
inter is an interval of time values to mask out
returns a list of timestamps,parabolaFitPeaks,baselines
"""
# parse all packets
packetsAll = [np.array(packetList, dtype='uint64') for packetList in packets] #64 bit photon packet
timestampsAll = [np.bitwise_and(packetList, self.timestampMask) for packetList in packetsAll]
outDict = {'timestamps':timestampsAll}
if doParabolaFitPeaks:
parabolaFitPeaksAll = [np.bitwise_and(\
np.right_shift(packetList, self.nBitsAfterParabolaPeak), \
self.pulseMask) for packetList in packetsAll]
outDict['parabolaFitPeaks']=parabolaFitPeaksAll
if doBaselines:
baselinesAll = [np.bitwise_and(\
np.right_shift(packetList, self.nBitsAfterBaseline), \
self.pulseMask) for packetList in packetsAll]
outDict['baselines'] = baselinesAll
return outDict
def parsePhotonPackets(self, packets, doParabolaFitPeaks=True, doBaselines=True):
"""
Parses an array of uint64 packets with the obs file format
inter is an interval of time values to mask out
returns a list of timestamps,parabolaFitPeaks,baselines
"""
# parse all packets
packetsAll = np.array(packets, dtype='uint64') #64 bit photon packet
timestampsAll = np.bitwise_and(packets, self.timestampMask)
if doParabolaFitPeaks:
parabolaFitPeaksAll = np.bitwise_and(\
np.right_shift(packets, self.nBitsAfterParabolaPeak), \
self.pulseMask)
else:
parabolaFitPeaksAll = np.arange(0)
if doBaselines:
baselinesAll = np.bitwise_and(\
np.right_shift(packets, self.nBitsAfterBaseline), \
self.pulseMask)
else:
baselinesAll = np.arange(0)
timestamps = timestampsAll
parabolaFitPeaks = parabolaFitPeaksAll
baselines = baselinesAll
# return the values filled in above
return timestamps, parabolaFitPeaks, baselines
def maskTimestamps(self,timestamps,inter=interval(),otherListsToFilter=[]):
"""
Masks out timestamps that fall in an given interval
inter is an interval of time values to mask out
otherListsToFilter is a list of parallel arrays to timestamps that should be masked in the same way
returns a dict with keys 'timestamps','otherLists'
"""
# first special case: inter masks out everything so return zero-length
# numpy arrays
if (inter == self.intervalAll):
filteredTimestamps = np.arange(0)
otherLists = [np.arange(0) for list in otherListsToFilter]
else:
if inter == interval() or len(timestamps) == 0:
# nothing excluded or nothing to exclude
# so return all unpacked values
filteredTimestamps = timestamps
otherLists = otherListsToFilter
else:
# there is a non-trivial set of times to mask.
slices = calculateSlices(inter, timestamps)
filteredTimestamps = repackArray(timestamps, slices)
otherLists = []
for eachList in otherListsToFilter:
filteredList = repackArray(eachList,slices)
otherLists.append(filteredList)
# return the values filled in above
return {'timestamps':filteredTimestamps,'otherLists':otherLists}
def parsePhotonPackets_old(self, packets, inter=interval(),
doParabolaFitPeaks=True, doBaselines=True,timestampOffset=0):
"""
Parses an array of uint64 packets with the obs file format
inter is an interval of time values to mask out
returns a list of timestamps,parabolaFitPeaks,baselines
"""
# first special case: inter masks out everything so return zero-length
# numpy arrays
if (inter == self.intervalAll):
timestamps = np.arange(0)
parabolaFitPeaks = np.arange(0)
baselines = np.arange(0)
else:
# parse all packets
packetsAll = np.array(packets, dtype='uint64') #64 bit photon packet
timestampsAll = np.bitwise_and(packets, self.timestampMask)
if doParabolaFitPeaks:
parabolaFitPeaksAll = np.bitwise_and(\
np.right_shift(packets, self.nBitsAfterParabolaPeak), \
self.pulseMask)
else:
parabolaFitPeaksAll = np.arange(0)
if doBaselines:
baselinesAll = np.bitwise_and(\
np.right_shift(packets, self.nBitsAfterBaseline), \
self.pulseMask)
else:
baselinesAll = np.arange(0)
if inter == interval() or len(timestampsAll) == 0:
# nothing excluded or nothing to exclude
# so return all unpacked values
timestamps = timestampsAll
parabolaFitPeaks = parabolaFitPeaksAll
baselines = baselinesAll
else:
# there is a non-trivial set of times to mask.
slices = calculateSlices(inter, timestampsAll)
timestamps = repackArray(timestampsAll, slices)
parabolaFitPeaks = repackArray(parabolaFitPeaksAll, slices)
baselines = repackArray(baselinesAll, slices)
# return the values filled in above
return timestamps, parabolaFitPeaks, baselines
def plotApertureSpectrum(self, pixelRow, pixelCol, radius1, radius2, weighted=False, fluxWeighted=False, lowCut=3000, highCut=7000, firstSec=0,integrationTime=-1):
summed_array, bin_edges = self.getApertureSpectrum(pixelCol=pixelCol, pixelRow=pixelRow, radius1=radius1, radius2=radius2, weighted=weighted, fluxWeighted=fluxWeighted, lowCut=lowCut, highCut=highCut, firstSec=firstSec,integrationTime=integrationTime)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(bin_edges[12:-2], summed_array[12:-1])
plt.xlabel('Wavelength ($\AA$)')
plt.ylabel('Counts')
plt.show()
def plotPixelSpectra(self, pixelRow, pixelCol, firstSec=0, integrationTime= -1,
weighted=False, fluxWeighted=False):
"""
plots the wavelength calibrated spectrum of a given pixel integrated over a given time
if integrationTime is -1, All time after firstSec is used.
if weighted is True, flat cal weights are applied
"""
spectrum = (self.getPixelSpectrum(pixelRow, pixelCol, firstSec, integrationTime,
weighted=weighted, fluxWeighted=fluxWeighted))['spectrum']
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(self.flatCalWvlBins[0:-1], spectrum, label='spectrum for pixel[%d][%d]' % (pixelRow, pixelCol))
plt.show()
def setWvlCutoffs(self, wvlLowerLimit=3000, wvlUpperLimit=8000):
"""
Sets wavelength cutoffs so that if convertToWvl(excludeBad=True) or getPixelWvlList(excludeBad=True) is called
wavelengths outside these limits are excluded. To remove limits
set wvlLowerLimit and/or wvlUpperLimit to None. To use the wavecal limits
for each individual pixel, set wvlLowerLimit and/or wvlUpperLimit to -1
NB - changed defaults for lower/upper limits to None (from 3000 and 8000). JvE 2/22/13
"""
self.wvlLowerLimit = wvlLowerLimit
self.wvlUpperLimit = wvlUpperLimit
def switchOffHotPixTimeMask(self):
"""
Switch off hot pixel time masking - bad pixel times will no longer be
removed (although the mask remains 'loaded' in ObsFile instance).
"""
self.hotPixIsApplied = False
def switchOnHotPixTimeMask(self,reasons=[]):
"""
Switch on hot pixel time masking. Subsequent calls to getPixelCountImage
etc. will have bad pixel times removed.
"""
if self.hotPixTimeMask is None:
raise RuntimeError, 'No hot pixel file loaded'
self.hotPixIsApplied = True
if len(reasons)>0:
self.hotPixTimeMask.set_mask(reasons)
#self.hotPixTimeMask.mask = [self.hotPixTimeMask.reasonEnum[reason] for reason in reasons]
def switchOffCosmicTimeMask(self):
"""
Switch off hot pixel time masking - bad pixel times will no longer be
removed (although the mask remains 'loaded' in ObsFile instance).
"""
self.cosmicMaskIsApplied = False
def switchOnCosmicTimeMask(self):
"""
Switch on cosmic time masking. Subsequent calls to getPixelCountImage
etc. will have cosmic times removed.
"""
if self.cosmicMask is None:
raise RuntimeError, 'No cosmic mask file loaded'
self.cosmicMaskIsApplied = True
@staticmethod
def writeCosmicIntervalToFile(intervals, ticksPerSec, fileName,
beginTime, endTime, stride,
threshold, nSigma, populationMax):
h5f = tables.openFile(fileName, 'w')
headerGroup = h5f.createGroup("/", 'Header', 'Header')
headerTable = h5f.createTable(headerGroup,'Header',
cosmicHeaderDescription, 'Header')
header = headerTable.row
header['ticksPerSec'] = ticksPerSec
header['beginTime'] = beginTime
header['endTime'] = endTime
header['stride'] = stride
header['threshold'] = threshold
header['nSigma'] = nSigma
header['populationMax'] = populationMax
header.append()
headerTable.flush()
headerTable.close()
tbl = h5f.createTable("/", "cosmicMaskData", TimeMask.TimeMask,
"Cosmic Mask")
for interval in intervals:
row = tbl.row
tBegin = max(0,int(np.round(interval[0]*ticksPerSec)))
row['tBegin'] = tBegin
tEnd = int(np.round(interval[1]*ticksPerSec))
row['tEnd'] = tEnd
row['reason'] = TimeMask.timeMaskReason["cosmic"]
row.append()
tbl.flush()
tbl.close()
h5f.close()
@staticmethod
def readCosmicIntervalFromFile(fileName):
fid = tables.openFile(fileName, mode='r')
headerInfo = fid.getNode("/Header","Header")[0]
ticksPerSec = headerInfo['ticksPerSec']
table = fid.getNode("/cosmicMaskData")
enum = table.getEnum('reason')
retval = interval()
for i in range(table.nrows):
temp = (interval[table[i]['tBegin'],table[i]['tEnd']])/ticksPerSec
retval = retval | temp
fid.close()
return retval
@staticmethod
def invertInterval(interval0, iMin=float("-inf"), iMax=float("inf")):
"""
invert the interval
inputs:
interval0 -- the interval to invert
iMin=-inf -- beginning of the new interval
iMax-inv -- end of the new interval
return:
the interval between iMin, iMax that is NOT masked by interval0
"""
if len(interval0) == 0:
retval = interval[iMin,iMax]
else:
retval = interval()
previous = [iMin,iMin]
for segment in interval0:
if previous[1] < segment[0]:
temp = interval[previous[1],segment[0]]
if len(temp) > 0:
retval = retval | temp
previous = segment
if previous[1] < iMax:
temp = interval[previous[1],iMax]
if len(temp) > 0:
retval = retval | temp
return retval
def writePhotonList(self,*nkwargs,**kwargs): #filename=None, firstSec=0, integrationTime=-1):
"""
Write out the photon list for this obs file.
See photonlist/photlist.py for input parameters and outputs.
Shifted over to photonlist/, May 10 2013, JvE. All under construction at the moment.
"""
import photonlist.photlist #Here instead of at top to avoid circular imports
photonlist.photlist.writePhotonList(self,*nkwargs,**kwargs)
# writes out the photon list for this obs file at $MKID_PROC_PATH/photonListFileName
# currently cuts out photons outside the valid wavelength ranges from the wavecal
#
# Currently being updated... JvE 4/26/2013.
# This version should automatically reject time-masked photons assuming a hot pixel mask is
# loaded and 'switched on'.
#
# INPUTS:
# filename - string, optionally use to specify non-default output file name
# for photon list. If not supplied, default name/path is determined
# using original obs. file name and standard directory paths (as per
# util.FileName). Added 4/29/2013, JvE.
# firstSec - Start time within the obs. file from which to begin the
# photon list (in seconds, from the beginning of the obs. file).
# integrationTime - Length of exposure time to extract (in sec, starting from
# firstSec). -1 to extract to end of obs. file.
#
# """
#
# if self.flatCalFile is None: raise RuntimeError, "No flat cal. file loaded"
# if self.fluxCalFile is None: raise RuntimeError, "No flux cal. file loaded"
# if self.wvlCalFile is None: raise RuntimeError, "No wavelength cal. file loaded"
# if self.hotPixFile is None: raise RuntimeError, "No hot pixel file loaded"
# if self.file is None: raise RuntimeError, "No obs file loaded...?"
#
# plFile = self.createEmptyPhotonListFile(filename)
# #try:
# plTable = plFile.root.photons.photons
#
# try:
# plFile.copyNode(self.flatCalFile.root.flatcal, newparent=plFile.root, newname='flatcal', recursive=True)
# plFile.copyNode(self.fluxCalFile.root.fluxcal, newparent=plFile.root, newname='fluxcal', recursive=True)
# plFile.copyNode(self.wvlCalFile.root.wavecal, newparent=plFile.root, newname='wavecal', recursive=True)
# plFile.copyNode(self.hotPixFile.root, newparent=plFile.root, newname='timemask', recursive=True)
# plFile.copyNode(self.file.root.beammap, newparent=plFile.root, newname='beammap', recursive=True)
# plFile.copyNode(self.file.root.header, newparent=plFile.root, recursive=True)
# except:
# plFile.flush()
# plFile.close()
# raise
#
# plFile.flush()
#
# fluxWeights = self.fluxWeights #Flux weights are independent of pixel location.
# #Extend flux weight/flag arrays as for flat weight/flags.
# fluxWeights = np.hstack((fluxWeights[0],fluxWeights,fluxWeights[-1]))
# fluxFlags = np.hstack((pipelineFlags.fluxCal['belowWaveCalRange'],
# self.fluxFlags,
# pipelineFlags.fluxCal['aboveWaveCalRange']))
#
# for iRow in xrange(self.nRow):
# for iCol in xrange(self.nCol):
# flag = self.wvlFlagTable[iRow, iCol]
# if flag == 0:#only write photons in good pixels ***NEED TO UPDATE TO USE DICTIONARY***
# energyError = self.wvlErrorTable[iRow, iCol] #Note wvlErrorTable is in eV !! Assume constant across all wavelengths. Not the best approximation, but a start....
# flatWeights = self.flatWeights[iRow, iCol]
# #Extend flat weight and flag arrays at beginning and end to include out-of-wavelength-calibration-range photons.
# flatWeights = np.hstack((flatWeights[0],flatWeights,flatWeights[-1]))
# flatFlags = np.hstack((pipelineFlags.flatCal['belowWaveCalRange'],
# self.flatFlags[iRow, iCol],
# pipelineFlags.flatCal['aboveWaveCalRange']))
#
#
# #wvlRange = self.wvlRangeTable[iRow, iCol]
#
# #---------- Replace with call to getPixelWvlList -----------
# #go through the list of seconds in a pixel dataset
# #for iSec, secData in enumerate(self.getPixel(iRow, iCol)):
#
# #timestamps, parabolaPeaks, baselines = self.parsePhotonPackets(secData)
# #timestamps = iSec + self.tickDuration * timestamps
#
# #pulseHeights = np.array(parabolaPeaks, dtype='double') - np.array(baselines, dtype='double')
# #wavelengths = self.convertToWvl(pulseHeights, iRow, iCol, excludeBad=False)
# #------------------------------------------------------------
#
# x = self.getPixelWvlList(iRow,iCol,excludeBad=False,dither=True,firstSec=firstSec,
# integrationTime=integrationTime)
# timestamps, wavelengths = x['timestamps'], x['wavelengths'] #Wavelengths in Angstroms
#
# #Convert errors in eV to errors in Angstroms (see notebook, May 7 2013)
# wvlErrors = ((( (energyError*units.eV) * (wavelengths*units.Angstrom)**2 ) /
# (constants.h*constants.c) )
# .to(units.Angstrom).value)
#
# #Calculate what wavelength bin each photon falls into to see which flat cal factor should be applied
# if len(wavelengths) > 0:
# flatBinIndices = np.digitize(wavelengths, self.flatCalWvlBins) #- 1 -
# else:
# flatBinIndices = np.array([])
#
# #Calculate which wavelength bin each photon falls into for the flux cal weight factors.
# if len(wavelengths) > 0:
# fluxBinIndices = np.digitize(wavelengths, self.fluxCalWvlBins)
# else:
# fluxBinIndices = np.array([])
#
# for iPhoton in xrange(len(timestamps)):
# #if wavelengths[iPhoton] > wvlRange[0] and wavelengths[iPhoton] < wvlRange[1] and binIndices[iPhoton] >= 0 and binIndices[iPhoton] < len(flatWeights):
# #create a new row for the photon list
# newRow = plTable.row
# newRow['Xpix'] = iCol
# newRow['Ypix'] = iRow
# newRow['ArrivalTime'] = timestamps[iPhoton]
# newRow['Wavelength'] = wavelengths[iPhoton]
# newRow['WaveError'] = wvlErrors[iPhoton]
# newRow['FlatFlag'] = flatFlags[flatBinIndices[iPhoton]]
# newRow['FlatWeight'] = flatWeights[flatBinIndices[iPhoton]]
# newRow['FluxFlag'] = fluxFlags[fluxBinIndices[iPhoton]]
# newRow['FluxWeight'] = fluxWeights[fluxBinIndices[iPhoton]]
# newRow.append()
# #finally:
# plTable.flush()
# plFile.close()
#
def calculateSlices(inter, timestamps):
'''
Hopefully a quicker version of the original calculateSlices. JvE 3/8/2013
Returns a list of strings, with format i0:i1 for a python array slice
inter is the interval of values in timestamps to mask out.
The resulting list of strings indicate elements that are not masked out
inter must be a single pyinterval 'interval' object (can be multi-component)
timestamps is a 1D array of timestamps (MUST be an *ordered* array).
If inter is a multi-component interval, the components must be unioned and sorted
(which is the default behaviour when intervals are defined, and is probably
always the case, so shouldn't be a problem).
'''
timerange = interval([timestamps[0],timestamps[-1]])
slices = []
slce = "0:" #Start at the beginning of the timestamps array....
imax = 0 #Will prevent error if inter is an empty interval
for eachComponent in inter.components:
#Check if eachComponent of the interval overlaps the timerange of the
#timestamps - if not, skip to the next component.
if eachComponent & timerange == interval(): continue
#[
#Possibly a bit faster to do this and avoid interval package, but not fully tested:
#if eachComponent[0][1] < timestamps[0] or eachComponent[0][0] > timestamps[-1]: continue
#]
imin = np.searchsorted(timestamps, eachComponent[0][0], side='left') #Find nearest timestamp to lower bound
imax = np.searchsorted(timestamps, eachComponent[0][1], side='right') #Nearest timestamp to upper bound
#As long as we're not about to create a wasteful '0:0' slice, go ahead
#and finish the new slice and append it to the list
if imin != 0:
slce += str(imin)
slices.append(slce)
slce = str(imax)+":"
#Finish the last slice at the end of the timestamps array if we're not already there:
if imax != len(timestamps):
slce += str(len(timestamps))
slices.append(slce)
return slices
def repackArray(array, slices):
"""
returns a copy of array that includes only the element defined by slices
"""
nIncluded = 0
for slce in slices:
s0 = int(slce.split(":")[0])
s1 = int(slce.split(":")[1])
nIncluded += s1 - s0
retval = np.zeros(nIncluded)
iPt = 0;
for slce in slices:
s0 = int(slce.split(":")[0])
s1 = int(slce.split(":")[1])
iPtNew = iPt + s1 - s0
retval[iPt:iPtNew] = array[s0:s1]
iPt = iPtNew
return retval
def diagnose(message,timestamps, peakHeights, baseline, expTails):
print "BEGIN DIAGNOSE message=",message
index = np.searchsorted(timestamps,99.000426)
print "index=",index
for i in range(index-1,index+2):
print "i=%5d timestamp=%11.6f"%(i,timestamps[i])
print "ENDED DIAGNOSE message=",message
class cosmicHeaderDescription(tables.IsDescription):
ticksPerSec = tables.Float64Col() # number of ticks per second
beginTime = tables.Float64Col() # begin time used to find cosmics (seconds)
endTime = tables.Float64Col() # end time used to find cosmics (seconds)
stride = tables.Int32Col()
threshold = tables.Float64Col()
nSigma = tables.Int32Col()
populationMax = tables.Int32Col()
#Temporary test
if __name__ == "__main__":
obs = ObsFile(FileName(run='PAL2012', date='20121210', tstamp='20121211-051650').obs())
obs.loadWvlCalFile(FileName(run='PAL2012',date='20121210',tstamp='20121211-052230').calSoln())
obs.loadFlatCalFile(FileName(obsFile=obs).flatSoln())
beforeImg = obs.getPixelCountImage(weighted=False,fluxWeighted=False,scaleByEffInt=True)
| gpl-2.0 |
gfyoung/pandas | scripts/validate_unwanted_patterns.py | 2 | 13580 | #!/usr/bin/env python3
"""
Unwanted patterns test cases.
The reason this file exist despite the fact we already have
`ci/code_checks.sh`,
(see https://github.com/pandas-dev/pandas/blob/master/ci/code_checks.sh)
is that some of the test cases are more complex/imposible to validate via regex.
So this file is somewhat an extensions to `ci/code_checks.sh`
"""
import argparse
import ast
import sys
import token
import tokenize
from typing import IO, Callable, Iterable, List, Set, Tuple
PRIVATE_IMPORTS_TO_IGNORE: Set[str] = {
"_extension_array_shared_docs",
"_index_shared_docs",
"_interval_shared_docs",
"_merge_doc",
"_shared_docs",
"_apply_docs",
"_new_Index",
"_new_PeriodIndex",
"_doc_template",
"_agg_template",
"_pipe_template",
"_get_version",
"__main__",
"_transform_template",
"_flex_comp_doc_FRAME",
"_op_descriptions",
"_IntegerDtype",
"_use_inf_as_na",
"_get_plot_backend",
"_matplotlib",
"_arrow_utils",
"_registry",
"_get_offset", # TODO: remove after get_offset deprecation enforced
"_test_parse_iso8601",
"_json_normalize", # TODO: remove after deprecation is enforced
"_testing",
"_test_decorators",
"__version__", # check np.__version__ in compat.numpy.function
}
def _get_literal_string_prefix_len(token_string: str) -> int:
"""
Getting the length of the literal string prefix.
Parameters
----------
token_string : str
String to check.
Returns
-------
int
Length of the literal string prefix.
Examples
--------
>>> example_string = "'Hello world'"
>>> _get_literal_string_prefix_len(example_string)
0
>>> example_string = "r'Hello world'"
>>> _get_literal_string_prefix_len(example_string)
1
"""
try:
return min(
token_string.find(quote)
for quote in (r"'", r'"')
if token_string.find(quote) >= 0
)
except ValueError:
return 0
def bare_pytest_raises(file_obj: IO[str]) -> Iterable[Tuple[int, str]]:
"""
Test Case for bare pytest raises.
For example, this is wrong:
>>> with pytest.raise(ValueError):
... # Some code that raises ValueError
And this is what we want instead:
>>> with pytest.raise(ValueError, match="foo"):
... # Some code that raises ValueError
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of unconcatenated string.
msg : str
Explenation of the error.
Notes
-----
GH #23922
"""
contents = file_obj.read()
tree = ast.parse(contents)
for node in ast.walk(tree):
if not isinstance(node, ast.Call):
continue
try:
if not (node.func.value.id == "pytest" and node.func.attr == "raises"):
continue
except AttributeError:
continue
if not node.keywords:
yield (
node.lineno,
"Bare pytests raise have been found. "
"Please pass in the argument 'match' as well the exception.",
)
else:
# Means that there are arguments that are being passed in,
# now we validate that `match` is one of the passed in arguments
if not any(keyword.arg == "match" for keyword in node.keywords):
yield (
node.lineno,
"Bare pytests raise have been found. "
"Please pass in the argument 'match' as well the exception.",
)
PRIVATE_FUNCTIONS_ALLOWED = {"sys._getframe"} # no known alternative
def private_function_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str]]:
"""
Checking that a private function is not used across modules.
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of the private function that is used across modules.
msg : str
Explenation of the error.
"""
contents = file_obj.read()
tree = ast.parse(contents)
imported_modules: Set[str] = set()
for node in ast.walk(tree):
if isinstance(node, (ast.Import, ast.ImportFrom)):
for module in node.names:
module_fqdn = module.name if module.asname is None else module.asname
imported_modules.add(module_fqdn)
if not isinstance(node, ast.Call):
continue
try:
module_name = node.func.value.id
function_name = node.func.attr
except AttributeError:
continue
# Exception section #
# (Debatable) Class case
if module_name[0].isupper():
continue
# (Debatable) Dunder methods case
elif function_name.startswith("__") and function_name.endswith("__"):
continue
elif module_name + "." + function_name in PRIVATE_FUNCTIONS_ALLOWED:
continue
if module_name in imported_modules and function_name.startswith("_"):
yield (node.lineno, f"Private function '{module_name}.{function_name}'")
def private_import_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str]]:
"""
Checking that a private function is not imported across modules.
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of import statement, that imports the private function.
msg : str
Explenation of the error.
"""
contents = file_obj.read()
tree = ast.parse(contents)
for node in ast.walk(tree):
if not (isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom)):
continue
for module in node.names:
module_name = module.name.split(".")[-1]
if module_name in PRIVATE_IMPORTS_TO_IGNORE:
continue
if module_name.startswith("_"):
yield (node.lineno, f"Import of internal function {repr(module_name)}")
def strings_to_concatenate(file_obj: IO[str]) -> Iterable[Tuple[int, str]]:
"""
This test case is necessary after 'Black' (https://github.com/psf/black),
is formating strings over multiple lines.
For example, when this:
>>> foo = (
... "bar "
... "baz"
... )
Is becoming this:
>>> foo = ("bar " "baz")
'Black' is not considering this as an
issue (see https://github.com/psf/black/issues/1051),
so we are checking it here instead.
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of unconcatenated string.
msg : str
Explenation of the error.
Notes
-----
GH #30454
"""
tokens: List = list(tokenize.generate_tokens(file_obj.readline))
for current_token, next_token in zip(tokens, tokens[1:]):
if current_token.type == next_token.type == token.STRING:
yield (
current_token.start[0],
(
"String unnecessarily split in two by black. "
"Please merge them manually."
),
)
def strings_with_wrong_placed_whitespace(
file_obj: IO[str],
) -> Iterable[Tuple[int, str]]:
"""
Test case for leading spaces in concated strings.
For example:
>>> rule = (
... "We want the space at the end of the line, "
... "not at the beginning"
... )
Instead of:
>>> rule = (
... "We want the space at the end of the line,"
... " not at the beginning"
... )
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of unconcatenated string.
msg : str
Explenation of the error.
"""
def has_wrong_whitespace(first_line: str, second_line: str) -> bool:
"""
Checking if the two lines are mattching the unwanted pattern.
Parameters
----------
first_line : str
First line to check.
second_line : str
Second line to check.
Returns
-------
bool
True if the two recived string match, an unwanted pattern.
Notes
-----
The unwanted pattern that we are trying to catch is if the spaces in
a string that is concatenated over multiple lines are placed at the
end of each string, unless this string is ending with a
newline character (\n).
For example, this is bad:
>>> rule = (
... "We want the space at the end of the line,"
... " not at the beginning"
... )
And what we want is:
>>> rule = (
... "We want the space at the end of the line, "
... "not at the beginning"
... )
And if the string is ending with a new line character (\n) we
do not want any trailing whitespaces after it.
For example, this is bad:
>>> rule = (
... "We want the space at the begging of "
... "the line if the previous line is ending with a \n "
... "not at the end, like always"
... )
And what we do want is:
>>> rule = (
... "We want the space at the begging of "
... "the line if the previous line is ending with a \n"
... " not at the end, like always"
... )
"""
if first_line.endswith(r"\n"):
return False
elif first_line.startswith(" ") or second_line.startswith(" "):
return False
elif first_line.endswith(" ") or second_line.endswith(" "):
return False
elif (not first_line.endswith(" ")) and second_line.startswith(" "):
return True
return False
tokens: List = list(tokenize.generate_tokens(file_obj.readline))
for first_token, second_token, third_token in zip(tokens, tokens[1:], tokens[2:]):
# Checking if we are in a block of concated string
if (
first_token.type == third_token.type == token.STRING
and second_token.type == token.NL
):
# Striping the quotes, with the string litteral prefix
first_string: str = first_token.string[
_get_literal_string_prefix_len(first_token.string) + 1 : -1
]
second_string: str = third_token.string[
_get_literal_string_prefix_len(third_token.string) + 1 : -1
]
if has_wrong_whitespace(first_string, second_string):
yield (
third_token.start[0],
(
"String has a space at the beginning instead "
"of the end of the previous string."
),
)
def main(
function: Callable[[IO[str]], Iterable[Tuple[int, str]]],
source_path: str,
output_format: str,
) -> bool:
"""
Main entry point of the script.
Parameters
----------
function : Callable
Function to execute for the specified validation type.
source_path : str
Source path representing path to a file/directory.
output_format : str
Output format of the error message.
file_extensions_to_check : str
Comma separated values of what file extensions to check.
excluded_file_paths : str
Comma separated values of what file paths to exclude during the check.
Returns
-------
bool
True if found any patterns are found related to the given function.
Raises
------
ValueError
If the `source_path` is not pointing to existing file/directory.
"""
is_failed: bool = False
for file_path in source_path:
with open(file_path, encoding="utf-8") as file_obj:
for line_number, msg in function(file_obj):
is_failed = True
print(
output_format.format(
source_path=file_path, line_number=line_number, msg=msg
)
)
return is_failed
if __name__ == "__main__":
available_validation_types: List[str] = [
"bare_pytest_raises",
"private_function_across_module",
"private_import_across_module",
"strings_to_concatenate",
"strings_with_wrong_placed_whitespace",
]
parser = argparse.ArgumentParser(description="Unwanted patterns checker.")
parser.add_argument("paths", nargs="*", help="Source paths of files to check.")
parser.add_argument(
"--format",
"-f",
default="{source_path}:{line_number}:{msg}",
help="Output format of the error message.",
)
parser.add_argument(
"--validation-type",
"-vt",
choices=available_validation_types,
required=True,
help="Validation test case to check.",
)
args = parser.parse_args()
sys.exit(
main(
function=globals().get(args.validation_type),
source_path=args.paths,
output_format=args.format,
)
)
| bsd-3-clause |
toastedcornflakes/scikit-learn | sklearn/datasets/__init__.py | 72 | 3807 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_breast_cancer
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
RayMick/scikit-learn | sklearn/cluster/mean_shift_.py | 96 | 15434 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
samzhang111/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
uwescience/pulse2percept | pulse2percept/stimuli/images.py | 1 | 23652 | """`ImageStimulus`, `LogoBVL`, `LogoUCSB`"""
from os.path import dirname, join
import numpy as np
from copy import deepcopy
import matplotlib.pyplot as plt
from matplotlib.axes import Subplot
from skimage import img_as_float32
from skimage.io import imread, imsave
from skimage.color import rgba2rgb, rgb2gray
from skimage.measure import moments as img_moments
from skimage.transform import (resize as img_resize, rotate as img_rotate,
warp as img_warp, SimilarityTransform)
from skimage.filters import (threshold_mean, threshold_minimum, threshold_otsu,
threshold_local, threshold_isodata, scharr, sobel,
median)
from skimage.feature import canny
from .base import Stimulus
from .pulses import BiphasicPulse
class ImageStimulus(Stimulus):
"""ImageStimulus
A stimulus made from an image, where each pixel gets assigned to an
electrode, and grayscale values in the range [0, 255] get converted to
activation values in the range [0, 1].
.. seealso ::
* `Basic Concepts > Electrical Stimuli <topics-stimuli>`
* :py:class:`~pulse2percept.stimuli.VideoStimulus`
.. versionadded:: 0.7
Parameters
----------
source : str
Path to image file. Supported image types include JPG, PNG, and TIF;
and are inferred from the file ending. If the file does not have a
proper file ending, specify the file type via ``format``.
Use :py:class:`~pulse2percept.stimuli.VideoStimulus` for GIFs.
format : str, optional
An image format string supported by imageio, such as 'JPG', 'PNG', or
'TIFF'. Use if the file type cannot be inferred from ``source``.
For a full list of supported formats, see
https://imageio.readthedocs.io/en/stable/formats.html.
resize : (height, width) or None, optional
A tuple specifying the desired height and the width of the image
stimulus. One shape dimension can be -1. In this case, the value is
inferred from the other dimension by keeping a constant aspect ratio.
as_gray : bool, optional
Flag whether to convert the image to grayscale.
A four-channel image is interpreted as RGBA (e.g., a PNG), and the
alpha channel will be blended with the color black.
electrodes : int, string or list thereof; optional
Optionally, you can provide your own electrode names. If none are
given, electrode names will be numbered 0..N.
.. note::
The number of electrode names provided must match the number of
pixels in the (resized) image.
metadata : dict, optional
Additional stimulus metadata can be stored in a dictionary.
compress : bool, optional
If True, will remove pixels with 0 grayscale value.
"""
__slots__ = ('img_shape',)
def __init__(self, source, format=None, resize=None, as_gray=False,
electrodes=None, metadata=None, compress=False):
if metadata is None:
metadata = {}
if isinstance(source, str):
# Filename provided:
img = imread(source, format=format)
metadata['source'] = source
metadata['source_shape'] = img.shape
elif isinstance(source, ImageStimulus):
img = source.data.reshape(source.img_shape)
metadata.update(source.metadata)
if electrodes is None:
electrodes = source.electrodes
elif isinstance(source, np.ndarray):
img = source
else:
raise TypeError("Source must be a filename or another "
"ImageStimulus, not %s." % type(source))
if img.ndim < 2 or img.ndim > 3:
raise ValueError("Images must have 2 or 3 dimensions, not "
"%d." % img.ndim)
# Convert to grayscale if necessary:
if as_gray:
if img.ndim == 3 and img.shape[2] == 4:
# Blend the background with black:
img = rgba2rgb(img, background=(0, 0, 0))
img = rgb2gray(img)
# Resize if necessary:
if resize is not None:
height, width = resize
if height < 0 and width < 0:
raise ValueError('"height" and "width" cannot both be -1.')
if height < 0:
height = int(img.shape[0] * width / img.shape[1])
if width < 0:
width = int(img.shape[1] * height / img.shape[0])
img = img_resize(img, (height, width))
# Store the original image shape for resizing and color conversion:
self.img_shape = img.shape
# Convert to float array in [0, 1] and call the Stimulus constructor:
super(ImageStimulus, self).__init__(img_as_float32(img).ravel(),
time=None, electrodes=electrodes,
metadata=metadata,
compress=compress)
def _pprint_params(self):
params = super(ImageStimulus, self)._pprint_params()
params.update({'img_shape': self.img_shape})
return params
def invert(self):
"""Invert the gray levels of the image
Returns
-------
stim : `ImageStimulus`
A copy of the stimulus object with all grayscale values inverted
in the range [0, 1].
"""
img = deepcopy(self.data.reshape(self.img_shape))
if len(self.img_shape) > 2:
img[..., :3] = 1.0 - img[..., :3]
else:
img = 1.0 - img
return ImageStimulus(img, electrodes=self.electrodes,
metadata=self.metadata)
def rgb2gray(self, electrodes=None):
"""Convert the image to grayscale
Parameters
----------
electrodes : int, string or list thereof; optional
Optionally, you can provide your own electrode names. If none are
given, electrode names will be numbered 0..N.
.. note::
The number of electrode names provided must match the number of
pixels in the grayscale image.
Returns
-------
stim : `ImageStimulus`
A copy of the stimulus object with all grayscale values inverted
in the range [0, 1].
Notes
-----
* A four-channel image is interpreted as RGBA (e.g., a PNG), and the
alpha channel will be blended with the color black.
"""
img = self.data.reshape(self.img_shape)
if img.ndim == 3 and img.shape[2] == 4:
# Blend the background with black:
img = rgba2rgb(img, background=(0, 0, 0))
return ImageStimulus(rgb2gray(img), electrodes=electrodes,
metadata=self.metadata)
def threshold(self, thresh, **kwargs):
"""Threshold the image
Parameters
----------
thresh : str or float
If a float in [0,1] is provided, pixels whose grayscale value is
above said threshold will be white, others black.
A number of additional methods are supported:
* 'mean': Threshold image based on the mean of grayscale values.
* 'minimum': Threshold image based on the minimum method, where
the histogram of the input image is computed and
smoothed until there are only two maxima.
* 'local': Threshold image based on `local pixel neighborhood
<https://scikit-image.org/docs/stable/api/skimage.filters.html#skimage.filters.threshold_local>_.
Requires ``block_size``: odd number of pixels in the
neighborhood.
* 'otsu': `Otsu's method
<https://scikit-image.org/docs/stable/api/skimage.filters.html#skimage.filters.threshold_otsu>_
* 'isodata': `ISODATA method
<https://scikit-image.org/docs/stable/api/skimage.filters.html#skimage.filters.threshold_isodata>`_,
also known as the Ridler-Calvard method or
intermeans.
Returns
-------
stim : `ImageStimulus`
A copy of the stimulus object with two gray levels 0.0 and 1.0
"""
if len(self.img_shape) > 2:
raise ValueError("Thresholding is only supported for grayscale "
"(i.e., single-channel) images. Use `rgb2gray` "
"first.")
img = self.data.reshape(self.img_shape)
if isinstance(thresh, str):
if thresh.lower() == 'mean':
img = img > threshold_mean(img)
elif thresh.lower() == 'minimum':
img = img > threshold_minimum(img, **kwargs)
elif thresh.lower() == 'local':
img = img > threshold_local(img, **kwargs)
elif thresh.lower() == 'otsu':
img = img > threshold_otsu(img, **kwargs)
elif thresh.lower() == 'isodata':
img = img > threshold_isodata(img, **kwargs)
else:
raise ValueError("Unknown threshold method '%s'." % thresh)
elif np.isscalar(thresh):
img = self.data.reshape(self.img_shape) > thresh
else:
raise TypeError("Threshold type must be str or float, not "
"%s." % type(thresh))
return ImageStimulus(img, electrodes=self.electrodes,
metadata=self.metadata)
def resize(self, shape, electrodes=None):
"""Resize the image
Parameters
----------
shape : (rows, cols)
Shape of the resized image
electrodes : int, string or list thereof; optional
Optionally, you can provide your own electrode names. If none are
given, electrode names will be numbered 0..N.
.. note::
The number of electrode names provided must match the number of
pixels in the grayscale image.
Returns
-------
stim : `ImageStimulus`
A copy of the stimulus object containing the resized image
"""
height, width = shape
if height < 0 and width < 0:
raise ValueError('"height" and "width" cannot both be -1.')
if height < 0:
height = int(self.img_shape[0] * width / self.img_shape[1])
if width < 0:
width = int(self.img_shape[1] * height / self.img_shape[0])
img = img_resize(self.data.reshape(self.img_shape), (height, width))
return ImageStimulus(img, electrodes=electrodes,
metadata=self.metadata)
def rotate(self, angle, center=None, mode='constant'):
"""Rotate the image
Parameters
----------
angle : float
Angle by which to rotate the image (degrees).
Positive: counter-clockwise, negative: clockwise
Returns
-------
stim : `ImageStimulus`
A copy of the stimulus object containing the rotated image
"""
img = img_rotate(self.data.reshape(self.img_shape), angle, mode=mode,
resize=False)
return ImageStimulus(img, electrodes=self.electrodes,
metadata=self.metadata)
def shift(self, shift_cols, shift_rows):
"""Shift the image foreground
This function shifts the center of mass (CoM) of the image by the
specified number of rows and columns.
Parameters
----------
shift_cols : float
Number of columns by which to shift the CoM.
Positive: to the right, negative: to the left
shift_rows : float
Number of rows by which to shift the CoM.
Positive: downward, negative: upward
Returns
-------
stim : `ImageStimulus`
A copy of the stimulus object containing the shifted image
"""
img = self.data.reshape(self.img_shape)
tf = SimilarityTransform(translation=[shift_cols, shift_rows])
img = img_warp(img, tf.inverse)
return ImageStimulus(img, electrodes=self.electrodes,
metadata=self.metadata)
def center(self, loc=None):
"""Center the image foreground
This function shifts the center of mass (CoM) to the image center.
Parameters
----------
loc : (col, row), optional
The pixel location at which to center the CoM. By default, shifts
the CoM to the image center.
Returns
-------
stim : `ImageStimulus`
A copy of the stimulus object containing the centered image
"""
# Calculate center of mass:
img = self.data.reshape(self.img_shape)
m = img_moments(img, order=1)
# No area found:
if np.isclose(m[0, 0], 0):
return img
# Center location:
if loc is None:
loc = np.array(self.img_shape[::-1]) / 2.0 - 0.5
# Shift the image by -centroid, +image center:
transl = (loc[0] - m[0, 1] / m[0, 0], loc[1] - m[1, 0] / m[0, 0])
tf_shift = SimilarityTransform(translation=transl)
img = img_warp(img, tf_shift.inverse)
return ImageStimulus(img, electrodes=self.electrodes,
metadata=self.metadata)
def scale(self, scaling_factor):
"""Scale the image foreground
This function scales the image foreground (excluding black pixels)
by a factor.
Parameters
----------
scaling_factor : float
Factory by which to scale the image
Returns
-------
stim : `ImageStimulus`
A copy of the stimulus object containing the scaled image
"""
if scaling_factor <= 0:
raise ValueError("Scaling factor must be greater than zero")
# Calculate center of mass:
img = self.data.reshape(self.img_shape)
m = img_moments(img, order=1)
# No area found:
if np.isclose(m[0, 0], 0):
return img
# Shift the phosphene to (0, 0):
center_mass = np.array([m[0, 1] / m[0, 0], m[1, 0] / m[0, 0]])
tf_shift = SimilarityTransform(translation=-center_mass)
# Scale the phosphene:
tf_scale = SimilarityTransform(scale=scaling_factor)
# Shift the phosphene back to where it was:
tf_shift_inv = SimilarityTransform(translation=center_mass)
# Combine all three transforms:
tf = tf_shift + tf_scale + tf_shift_inv
img = img_warp(img, tf.inverse)
return ImageStimulus(img, electrodes=self.electrodes,
metadata=self.metadata)
def filter(self, filt, **kwargs):
"""Filter the image
Parameters
----------
filt : str
Image filter. Additional parameters can be passed as keyword
arguments. The following filters are supported:
* 'sobel': Edge filter the image using the `Sobel filter
<https://scikit-image.org/docs/stable/api/skimage.filters.html#skimage.filters.sobel>`_.
* 'scharr': Edge filter the image using the `Scarr filter
<https://scikit-image.org/docs/stable/api/skimage.filters.html#skimage.filters.scharr>`_.
* 'canny': Edge filter the image using the `Canny algorithm
<https://scikit-image.org/docs/stable/api/skimage.feature.html#skimage.feature.canny>`_.
You can also specify ``sigma``, ``low_threshold``,
``high_threshold``, ``mask``, and ``use_quantiles``.
* 'median': Return local median of the image.
**kwargs :
Additional parameters passed to the filter
Returns
-------
stim : `ImageStimulus`
A copy of the stimulus object with the filtered image
"""
if not isinstance(filt, str):
raise TypeError("'filt' must be a string, not %s." % type(filt))
img = self.data.reshape(self.img_shape)
if filt.lower() == 'sobel':
img = sobel(img, **kwargs)
elif filt.lower() == 'scharr':
img = scharr(img, **kwargs)
elif filt.lower() == 'canny':
img = canny(img, **kwargs)
elif filt.lower() == 'median':
img = median(img, **kwargs)
else:
raise ValueError("Unknown filter '%s'." % filt)
return ImageStimulus(img, electrodes=self.electrodes,
metadata=self.metadata)
def apply(self, func, **kwargs):
"""Apply a function to the image
Parameters
----------
func : function
The function to apply to the image. Must accept a 2D or 3D image
and return an image with the same dimensions
**kwargs :
Additional parameters passed to the function
Returns
-------
stim : `ImageStimulus`
A copy of the stimulus object with the new image
"""
img = func(self.data.reshape(self.img_shape), **kwargs)
return ImageStimulus(img, electrodes=self.electrodes,
metadata=self.metadata)
def encode(self, amp_range=(0, 50), pulse=None):
"""Encode image using amplitude modulation
Encodes the image as a series of pulses, where the gray levels of the
image are interpreted as the amplitude of a pulse with values in
``amp_range``.
By default, a single biphasic pulse is used for each pixel, with 0.46ms
phase duration, and 500ms total stimulus duration.
Parameters
----------
amp_range : (min_amp, max_amp)
Range of amplitude values to use for the encoding. The image's
gray levels will be scaled such that the smallest value is mapped
onto ``min_amp`` and the largest onto ``max_amp``.
pulse : :py:class:`~pulse2percept.stimuli.Stimulus`, optional
A valid pulse or pulse train to be used for the encoding.
If None given, a :py:class:`~pulse2percept.stimuli.BiphasicPulse`
(0.46 ms phase duration, 500 ms total duration) will be used.
Returns
-------
stim : :py:class:`~pulse2percept.stimuli.Stimulus`
Encoded stimulus
"""
if pulse is None:
pulse = BiphasicPulse(1, 0.46, stim_dur=500)
else:
if not isinstance(pulse, Stimulus):
raise TypeError("'pulse' must be a Stimulus object.")
if pulse.time is None:
raise ValueError("'pulse' must have a time component.")
# Make sure the provided pulse has max amp 1:
enc_data = pulse.data
if not np.isclose(np.abs(enc_data).max(), 0):
enc_data /= np.abs(enc_data).max()
# Normalize the range of pixel values:
px_data = self.data - self.data.min()
if not np.isclose(np.abs(px_data).max(), 0):
px_data /= np.abs(px_data).max()
# Amplitude modulation:
stim = []
for px, e in zip(px_data.ravel(), self.electrodes):
amp = px * (amp_range[1] - amp_range[0]) + amp_range[0]
stim.append(Stimulus(amp * enc_data, time=pulse.time,
electrodes=e))
return Stimulus(stim)
def plot(self, ax=None, **kwargs):
"""Plot the stimulus
Parameters
----------
ax : matplotlib.axes.Axes or list thereof; optional, default: None
A Matplotlib Axes object or a list thereof (one per electrode to
plot). If None, a new Axes object will be created.
Returns
-------
ax: matplotlib.axes.Axes
Returns the axes with the plot on it
"""
if ax is None:
ax = plt.gca()
if 'figsize' in kwargs:
ax.figure.set_size_inches(kwargs.pop('figsize'))
cmap = None
if len(self.img_shape) == 2:
cmap = 'gray'
if 'cmap' in kwargs:
cmap = kwargs.pop('cmap')
ax.imshow(self.data.reshape(self.img_shape), cmap=cmap, **kwargs)
return ax
def save(self, fname):
"""Save the stimulus as an image
Parameters
----------
fname : str
The name of the image file to be created. Image type will be
inferred from the file extension.
"""
imsave(fname, self.data.reshape(self.img_shape))
class LogoBVL(ImageStimulus):
"""Bionic Vision Lab (BVL) logo
Load the 576x720x4 Bionic Vision Lab (BVL) logo.
.. versionadded:: 0.7
Parameters
----------
resize : (height, width) or None
A tuple specifying the desired height and the width of the image
stimulus.
electrodes : int, string or list thereof; optional, default: None
Optionally, you can provide your own electrode names. If none are
given, electrode names will be numbered 0..N.
.. note::
The number of electrode names provided must match the number of
pixels in the (resized) image.
metadata : dict, optional, default: None
Additional stimulus metadata can be stored in a dictionary.
"""
def __init__(self, resize=None, electrodes=None, metadata=None,
as_gray=False):
# Load logo from data dir:
module_path = dirname(__file__)
source = join(module_path, 'data', 'bionic-vision-lab.png')
# Call ImageStimulus constructor:
super(LogoBVL, self).__init__(source, format="PNG",
resize=resize,
as_gray=as_gray,
electrodes=electrodes,
metadata=metadata,
compress=False)
class LogoUCSB(ImageStimulus):
"""UCSB logo
Load a 324x727 white-on-black logo of the University of California, Santa
Barbara.
.. versionadded:: 0.7
Parameters
----------
resize : (height, width) or None
A tuple specifying the desired height and the width of the image
stimulus.
electrodes : int, string or list thereof; optional, default: None
Optionally, you can provide your own electrode names. If none are
given, electrode names will be numbered 0..N.
.. note::
The number of electrode names provided must match the number of
pixels in the (resized) image.
metadata : dict, optional, default: None
Additional stimulus metadata can be stored in a dictionary.
"""
def __init__(self, resize=None, electrodes=None, metadata=None):
# Load logo from data dir:
module_path = dirname(__file__)
source = join(module_path, 'data', 'ucsb.png')
# Call ImageStimulus constructor:
super(LogoUCSB, self).__init__(source, format="PNG",
resize=resize,
as_gray=True,
electrodes=electrodes,
metadata=metadata,
compress=False)
| bsd-3-clause |
bloyl/mne-python | examples/visualization/ssp_projs_sensitivity_map.py | 20 | 1256 | """
==================================
Sensitivity map of SSP projections
==================================
This example shows the sources that have a forward field
similar to the first SSP vector correcting for ECG.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from mne import read_forward_solution, read_proj, sensitivity_map
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg-proj.fif'
fwd = read_forward_solution(fname)
projs = read_proj(ecg_fname)
# take only one projection per channel type
projs = projs[::2]
# Compute sensitivity map
ssp_ecg_map = sensitivity_map(fwd, ch_type='grad', projs=projs, mode='angle')
###############################################################################
# Show sensitivity map
plt.hist(ssp_ecg_map.data.ravel())
plt.show()
args = dict(clim=dict(kind='value', lims=(0.2, 0.6, 1.)), smoothing_steps=7,
hemi='rh', subjects_dir=subjects_dir)
ssp_ecg_map.plot(subject='sample', time_label='ECG SSP sensitivity', **args)
| bsd-3-clause |
andaag/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
mikebenfield/scikit-learn | sklearn/decomposition/base.py | 23 | 5656 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <denis-alexander.engemann@inria.fr>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
williamgilpin/rk4 | rk4_poincare.py | 1 | 1762 | from matplotlib.pyplot import *
from scipy import *
from numpy import *
# a simple Runge-Kutta integrator for multiple dependent variables and one independent variable
def rungekutta4(yprime, time, y0):
# yprime is a list of functions, y0 is a list of initial values of y
# time is a list of t-values at which solutions are computed
#
# Dependency: numpy
N = len(time)
y = array([thing*ones(N) for thing in y0]).T
for ii in xrange(N-1):
dt = time[ii+1] - time[ii]
k1 = dt*yprime(y[ii], time[ii])
k2 = dt*yprime(y[ii] + 0.5*k1, time[ii] + 0.5*dt)
k3 = dt*yprime(y[ii] + 0.5*k2, time[ii] + 0.5*dt)
k4 = dt*yprime(y[ii] + k3, time[ii+1])
y[ii+1] = y[ii] + (k1 + 2.0*(k2 + k3) + k4)/6.0
return y
# Miscellaneous functions
def total_energy(valpair):
(x, y, px, py) = tuple(valpair)
return .5*(px**2 + py**2) + .5*(x**2 + y**2 + 2.*x**2.*y - (2.0/3)*y**3)
def pqdot(valpair, tval):
# input: [x, y, px, py], t
# takes a pair of x and y values and returns \dot{p} according to the Henon-Heiles Hamiltonian
(x, y, px, py) = tuple(valpair)
return array([px, py, -x-2*x*y, -y-x*x+y*y]).T
def findcrossings(data):
# returns indices in 1D data set where the data crossed zero. Useful for generating Poincare map at 0
prb = list()
for ii in xrange(len(data)-1):
if (data[ii] > 0)&(data[ii+1] < 0):
prb.append(ii)
if (data[ii] < 0)& (data[ii+1] > 0):
prb.append(ii)
return array(prb)
# # for testing:
#
# def test2(valpair, tval):
# xval = valpair[0]
# yval = valpair[1]
# return array([-7*yval, -yval+xval]).T
# kk = rungekutta4(test2, linspace(1,10,100), array([4.0, 2.0]))
# plot(kk) | mit |
mne-tools/mne-tools.github.io | 0.20/_downloads/f32a43d5ff22f0bd4aa430d2d5d4de0b/plot_compute_mne_inverse_raw_in_label.py | 19 | 1693 | """
.. _example-sLORETA:
=============================================
Compute sLORETA inverse solution on raw data
=============================================
Compute sLORETA inverse solution on raw dataset restricted
to a brain label and stores the solution in stc files for
visualisation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_raw, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
snr = 1.0 # use smaller SNR for raw data
lambda2 = 1.0 / snr ** 2
method = "sLORETA" # use sLORETA method (could also be MNE or dSPM)
# Load data
raw = mne.io.read_raw_fif(fname_raw)
inverse_operator = read_inverse_operator(fname_inv)
label = mne.read_label(fname_label)
raw.set_eeg_reference('average', projection=True) # set average reference.
start, stop = raw.time_as_index([0, 15]) # read the first 15s of data
# Compute inverse solution
stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label,
start, stop, pick_ori=None)
# Save result in stc files
stc.save('mne_%s_raw_inverse_%s' % (method, label_name))
###############################################################################
# View activation time-series
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
| bsd-3-clause |
billy-inn/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
sdiazpier/nest-simulator | pynest/nest/tests/test_get_set.py | 10 | 21354 | # -*- coding: utf-8 -*-
#
# test_get_set.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
NodeCollection get/set tests
"""
import unittest
import nest
import json
try:
import numpy as np
HAVE_NUMPY = True
except ImportError:
HAVE_NUMPY = False
try:
import pandas
import pandas.testing as pt
HAVE_PANDAS = True
except ImportError:
HAVE_PANDAS = False
@nest.ll_api.check_stack
class TestNodeCollectionGetSet(unittest.TestCase):
"""NodeCollection get/set tests"""
def setUp(self):
nest.ResetKernel()
def test_get(self):
"""
Test that get function works as expected.
"""
nodes = nest.Create('iaf_psc_alpha', 10)
C_m = nodes.get('C_m')
node_ids = nodes.get('global_id')
E_L = nodes.get('E_L')
V_m = nodes.get('V_m')
t_ref = nodes.get('t_ref')
g = nodes.get(['local', 'thread', 'vp'])
local = g['local']
thread = g['thread']
vp = g['vp']
self.assertEqual(C_m, (250.0, 250.0, 250.0, 250.0, 250.0,
250.0, 250.0, 250.0, 250.0, 250.0))
self.assertEqual(node_ids, tuple(range(1, 11)))
self.assertEqual(E_L, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(V_m, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(t_ref, (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
self.assertTrue(local)
self.assertEqual(thread, (0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
self.assertEqual(vp, (0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
g_reference = {'local': (True, True, True, True, True,
True, True, True, True, True),
'thread': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
'vp': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)}
self.assertEqual(g, g_reference)
def test_get_sliced(self):
"""
Test that get works on sliced NodeCollections
"""
nodes = nest.Create('iaf_psc_alpha', 10)
V_m = nodes[2:5].get('V_m')
g = nodes[5:7].get(['t_ref', 'tau_m'])
C_m = nodes[2:9:2].get('C_m')
self.assertEqual(V_m, (-70.0, -70.0, -70.0))
self.assertEqual(g['t_ref'], (2.0, 2.0))
self.assertEqual(C_m, (250.0, 250.0, 250.0, 250.0))
def test_get_composite(self):
"""
Test that get function works on composite NodeCollections
"""
n1 = nest.Create('iaf_psc_alpha', 2)
n2 = nest.Create('iaf_psc_delta', 2)
n3 = nest.Create('iaf_psc_exp')
n4 = nest.Create('iaf_psc_alpha', 3)
n1.set(V_m=[-77., -88.])
n3.set({'V_m': -55.})
n1.set(C_m=[251., 252.])
n2.set(C_m=[253., 254.])
n3.set({'C_m': 255.})
n4.set(C_m=[256., 257., 258.])
n5 = n1 + n2 + n3 + n4
status_dict = n5.get()
# Check that we get values in correct order
vm_ref = (-77., -88., -70., -70., -55, -70., -70., -70.)
self.assertEqual(status_dict['V_m'], vm_ref)
# Check that we get None where not applicable
# tau_syn_ex is part of iaf_psc_alpha
tau_ref = (2., 2., None, None, 2., 2., 2., 2.)
self.assertEqual(status_dict['tau_syn_ex'], tau_ref)
# refractory_input is part of iaf_psc_delta
refrac_ref = (None, None,
False, False,
None, None,
None, None)
self.assertEqual(status_dict['refractory_input'], refrac_ref)
# Check that calling get with string works on composite NCs, both on
# parameters all the models have, and on individual parameters.
Cm_ref = [x * 1. for x in range(251, 259)]
Cm = n5.get('C_m')
self.assertEqual(list(Cm), Cm_ref)
refrac = n5.get('refractory_input')
self.assertEqual(refrac, refrac_ref)
@unittest.skipIf(not HAVE_NUMPY, 'NumPy package is not available')
def test_get_different_size(self):
"""
Test get with different input for different sizes of NodeCollections
"""
single_sr = nest.Create('spike_recorder', 1)
multi_sr = nest.Create('spike_recorder', 10)
empty_array_float = np.array([], dtype=np.float64)
empty_array_int = np.array([], dtype=np.int64)
# Single node, literal parameter
self.assertEqual(single_sr.get('start'), 0.0)
# Single node, array parameter
self.assertEqual(single_sr.get(['start', 'time_in_steps']),
{'start': 0.0, 'time_in_steps': False})
# Single node, hierarchical with literal parameter
np.testing.assert_array_equal(single_sr.get('events', 'times'),
empty_array_float)
# Multiple nodes, hierarchical with literal parameter
values = multi_sr.get('events', 'times')
for v in values:
np.testing.assert_array_equal(v, empty_array_float)
# Single node, hierarchical with array parameter
values = single_sr.get('events', ['senders', 'times'])
self.assertEqual(len(values), 2)
self.assertTrue('senders' in values)
self.assertTrue('times' in values)
np.testing.assert_array_equal(values['senders'], empty_array_int)
np.testing.assert_array_equal(values['times'], empty_array_float)
# Multiple nodes, hierarchical with array parameter
values = multi_sr.get('events', ['senders', 'times'])
self.assertEqual(len(values), 2)
self.assertTrue('senders' in values)
self.assertTrue('times' in values)
self.assertEqual(len(values['senders']), len(multi_sr))
for v in values['senders']:
np.testing.assert_array_equal(v, empty_array_int)
for v in values['times']:
np.testing.assert_array_equal(v, empty_array_float)
# Single node, no parameter (gets all values)
values = single_sr.get()
num_values_single_sr = len(values.keys())
self.assertEqual(values['start'], 0.0)
# Multiple nodes, no parameter (gets all values)
values = multi_sr.get()
self.assertEqual(len(values.keys()), num_values_single_sr)
self.assertEqual(values['start'],
tuple(0.0 for i in range(len(multi_sr))))
@unittest.skipIf(not HAVE_PANDAS, 'Pandas package is not available')
def test_get_pandas(self):
"""
Test that get function with Pandas output works as expected.
"""
single_sr = nest.Create('spike_recorder', 1)
multi_sr = nest.Create('spike_recorder', 10)
empty_array_float = np.array([], dtype=np.float64)
# Single node, literal parameter
pt.assert_frame_equal(single_sr.get('start', output='pandas'),
pandas.DataFrame({'start': [0.0]},
index=tuple(single_sr.tolist())))
# Multiple nodes, literal parameter
pt.assert_frame_equal(multi_sr.get('start', output='pandas'),
pandas.DataFrame(
{'start': [0.0 for i in range(
len(multi_sr))]},
index=tuple(multi_sr.tolist())))
# Single node, array parameter
pt.assert_frame_equal(single_sr.get(['start', 'n_events'],
output='pandas'),
pandas.DataFrame({'start': [0.0],
'n_events': [0]},
index=tuple(single_sr.tolist())))
# Multiple nodes, array parameter
ref_dict = {'start': [0.0 for i in range(len(multi_sr))],
'n_events': [0]}
pt.assert_frame_equal(multi_sr.get(['start', 'n_events'],
output='pandas'),
pandas.DataFrame(ref_dict,
index=tuple(multi_sr.tolist())))
# Single node, hierarchical with literal parameter
pt.assert_frame_equal(single_sr.get('events', 'times',
output='pandas'),
pandas.DataFrame({'times': [[]]},
index=tuple(single_sr.tolist())))
# Multiple nodes, hierarchical with literal parameter
ref_dict = {'times': [empty_array_float
for i in range(len(multi_sr))]}
pt.assert_frame_equal(multi_sr.get('events', 'times',
output='pandas'),
pandas.DataFrame(ref_dict,
index=tuple(multi_sr.tolist())))
# Single node, hierarchical with array parameter
ref_df = pandas.DataFrame(
{'times': [[]], 'senders': [[]]}, index=tuple(single_sr.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(single_sr.get(
'events', ['senders', 'times'], output='pandas'),
ref_df)
# Multiple nodes, hierarchical with array parameter
ref_dict = {'times': [[] for i in range(len(multi_sr))],
'senders': [[] for i in range(len(multi_sr))]}
ref_df = pandas.DataFrame(
ref_dict,
index=tuple(multi_sr.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
sr_df = multi_sr.get('events', ['senders', 'times'], output='pandas')
sr_df = sr_df.reindex(sorted(sr_df.columns), axis=1)
pt.assert_frame_equal(sr_df,
ref_df)
# Single node, no parameter (gets all values)
values = single_sr.get(output='pandas')
num_values_single_sr = values.shape[1]
self.assertEqual(values['start'][tuple(single_sr.tolist())[0]], 0.0)
# Multiple nodes, no parameter (gets all values)
values = multi_sr.get(output='pandas')
self.assertEqual(values.shape, (len(multi_sr), num_values_single_sr))
pt.assert_series_equal(values['start'],
pandas.Series({key: 0.0
for key in tuple(multi_sr.tolist())},
dtype=np.float64,
name='start'))
# With data in events
nodes = nest.Create('iaf_psc_alpha', 10)
pg = nest.Create('poisson_generator', {'rate': 70000.0})
nest.Connect(pg, nodes)
nest.Connect(nodes, single_sr)
nest.Connect(nodes, multi_sr, 'one_to_one')
nest.Simulate(50)
ref_values = single_sr.get('events', ['senders', 'times'])
ref_df = pandas.DataFrame({key: [ref_values[key]] for key in ['senders', 'times']},
index=tuple(single_sr.tolist()))
sd_df = single_sr.get('events', ['senders', 'times'], output='pandas')
pt.assert_frame_equal(sd_df, ref_df)
ref_values = multi_sr.get('events', ['senders', 'times'])
ref_df = pandas.DataFrame(ref_values, index=tuple(multi_sr.tolist()))
sd_df = multi_sr.get('events', ['senders', 'times'], output='pandas')
pt.assert_frame_equal(sd_df, ref_df)
def test_get_JSON(self):
"""
Test that get function with json output works as expected.
"""
single_sr = nest.Create('spike_recorder', 1)
multi_sr = nest.Create('spike_recorder', 10)
# Single node, literal parameter
self.assertEqual(json.loads(
single_sr.get('start', output='json')), 0.0)
# Multiple nodes, literal parameter
self.assertEqual(
json.loads(multi_sr.get('start', output='json')),
len(multi_sr) * [0.0])
# Single node, array parameter
ref_dict = {'start': 0.0, 'n_events': 0}
self.assertEqual(
json.loads(single_sr.get(['start', 'n_events'], output='json')),
ref_dict)
# Multiple nodes, array parameter
ref_dict = {'start': len(multi_sr) * [0.0],
'n_events': len(multi_sr) * [0]}
self.assertEqual(
json.loads(multi_sr.get(['start', 'n_events'], output='json')),
ref_dict)
# Single node, hierarchical with literal parameter
self.assertEqual(json.loads(single_sr.get(
'events', 'times', output='json')), [])
# Multiple nodes, hierarchical with literal parameter
ref_list = len(multi_sr) * [[]]
self.assertEqual(
json.loads(multi_sr.get('events', 'times', output='json')),
ref_list)
# Single node, hierarchical with array parameter
ref_dict = {'senders': [], 'times': []}
self.assertEqual(
json.loads(single_sr.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
# Multiple nodes, hierarchical with array parameter
ref_dict = {'times': len(multi_sr) * [[]],
'senders': len(multi_sr) * [[]]}
self.assertEqual(
json.loads(multi_sr.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
# Single node, no parameter (gets all values)
values = json.loads(single_sr.get(output='json'))
num_values_single_sr = len(values)
self.assertEqual(values['start'], 0.0)
# Multiple nodes, no parameter (gets all values)
values = json.loads(multi_sr.get(output='json'))
self.assertEqual(len(values), num_values_single_sr)
self.assertEqual(values['start'], len(multi_sr) * [0.0])
# With data in events
nodes = nest.Create('iaf_psc_alpha', 10)
pg = nest.Create('poisson_generator', {'rate': 70000.0})
nest.Connect(pg, nodes)
nest.Connect(nodes, single_sr)
nest.Connect(nodes, multi_sr, 'one_to_one')
nest.Simulate(50)
sd_ref = single_sr.get('events', ['senders', 'times'])
sd_json = single_sr.get('events', ['senders', 'times'], output='json')
sd_dict = json.loads(sd_json)
self.assertEqual(len(sd_dict.keys()), 2)
self.assertEqual(sorted(sd_dict.keys()), sorted(sd_ref.keys()))
for key in ['senders', 'times']:
self.assertEqual(list(sd_ref[key]), list(sd_dict[key]))
multi_sr_ref = multi_sr.get('events', ['senders', 'times'])
multi_sr_json = multi_sr.get('events', ['senders', 'times'], output='json')
multi_sr_dict = json.loads(multi_sr_json)
self.assertEqual(len(multi_sr_dict.keys()), 2)
self.assertEqual(sorted(multi_sr_dict.keys()), sorted(multi_sr_ref.keys()))
for key in ['senders', 'times']:
multi_sr_ref_element = [list(element) for element in multi_sr_ref[key]]
self.assertEqual(multi_sr_ref_element, multi_sr_dict[key])
def test_set(self):
"""
Test that set function works as expected.
"""
nodes = nest.Create('iaf_psc_alpha', 10)
# Dict to set same value for all nodes.
nodes.set({'C_m': 100.0})
C_m = nodes.get('C_m')
self.assertEqual(C_m, (100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0))
# Set same value for all nodes.
nodes.set(tau_Ca=500.0)
tau_Ca = nodes.get('tau_Ca')
self.assertEqual(tau_Ca, (500.0, 500.0, 500.0, 500.0, 500.0,
500.0, 500.0, 500.0, 500.0, 500.0))
# List of dicts, where each dict corresponds to a single node.
nodes.set(({'V_m': 10.0}, {'V_m': 20.0}, {'V_m': 30.0}, {'V_m': 40.0},
{'V_m': 50.0}, {'V_m': 60.0}, {'V_m': 70.0}, {'V_m': 80.0},
{'V_m': 90.0}, {'V_m': -100.0}))
V_m = nodes.get('V_m')
self.assertEqual(V_m, (10.0, 20.0, 30.0, 40.0, 50.0,
60.0, 70.0, 80.0, 90.0, -100.0))
# Set value of a parameter based on list. List must be length of nodes.
nodes.set(V_reset=[-85., -82., -80., -77., -75.,
-72., -70., -67., -65., -62.])
V_reset = nodes.get('V_reset')
self.assertEqual(V_reset, (-85., -82., -80., -77., -75.,
-72., -70., -67., -65., -62.))
with self.assertRaises(IndexError):
nodes.set(V_reset=[-85., -82., -80., -77., -75.])
# Set different parameters with a dictionary.
nodes.set({'t_ref': 44.0, 'tau_m': 2.0, 'tau_minus': 42.0})
g = nodes.get(['t_ref', 'tau_m', 'tau_minus'])
self.assertEqual(g['t_ref'], (44.0, 44.0, 44.0, 44.0, 44.0,
44.0, 44.0, 44.0, 44.0, 44.0))
self.assertEqual(g['tau_m'], (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
self.assertEqual(g['tau_minus'], (42.0, 42.0, 42.0, 42.0, 42.0,
42.0, 42.0, 42.0, 42.0, 42.0))
with self.assertRaises(nest.kernel.NESTError):
nodes.set({'vp': 2})
def test_set_composite(self):
"""
Test that set works on composite NodeCollections
"""
nodes = nest.Create('iaf_psc_alpha', 10)
nodes[2:5].set(({'V_m': -50.0}, {'V_m': -40.0}, {'V_m': -30.0}))
nodes[5:7].set({'t_ref': 4.4, 'tau_m': 3.0})
nodes[2:9:2].set(C_m=111.0)
V_m = nodes.get('V_m')
g = nodes.get(['t_ref', 'tau_m'])
C_m = nodes.get('C_m')
self.assertEqual(V_m, (-70.0, -70.0, -50.0, -40.0, -30.0,
-70.0, -70.0, -70.0, -70.0, -70.0,))
self.assertEqual(g, {'t_ref': (2.0, 2.0, 2.0, 2.0, 2.0,
4.4, 4.4, 2.0, 2.0, 2.0),
'tau_m': (10.0, 10.0, 10.0, 10.0, 10.0,
3.00, 3.00, 10.0, 10.0, 10.0)})
self.assertEqual(C_m, (250.0, 250.0, 111.0, 250.0, 111.0,
250.0, 111.0, 250.0, 111.0, 250.0))
def test_get_attribute(self):
"""Test get using getattr"""
nodes = nest.Create('iaf_psc_alpha', 10)
self.assertEqual(nodes.C_m, (250.0, 250.0, 250.0, 250.0, 250.0,
250.0, 250.0, 250.0, 250.0, 250.0))
self.assertEqual(nodes.global_id, tuple(range(1, 11)))
self.assertEqual(nodes.E_L, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(nodes.V_m, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(nodes.t_ref, (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
with self.assertRaises(KeyError):
print(nodes.nonexistent_attribute)
self.assertIsNone(nodes.spatial)
spatial_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([2, 2]))
self.assertIsNotNone(spatial_nodes.spatial)
spatial_reference = {'network_size': 4,
'center': (0.0, 0.0),
'edge_wrap': False,
'extent': (1.0, 1.0),
'shape': (2, 2)}
self.assertEqual(spatial_nodes.spatial, spatial_reference)
def test_set_attribute(self):
"""Test set using setattr"""
nodes = nest.Create('iaf_psc_alpha', 10)
nodes.C_m = 100.0
self.assertEqual(nodes.get('C_m'), (100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0))
v_reset_reference = (-85., -82., -80., -77., -75., -72., -70., -67., -65., -62.)
nodes.V_reset = v_reset_reference
self.assertEqual(nodes.get('V_reset'), v_reset_reference)
with self.assertRaises(IndexError):
nodes.V_reset = [-85., -82., -80., -77., -75.]
with self.assertRaises(nest.kernel.NESTError):
nodes.nonexistent_attribute = 1.
def suite():
suite = unittest.makeSuite(TestNodeCollectionGetSet, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 |
rhyolight/nupic.research | projects/sequence_classification/util_functions.py | 11 | 11219 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import os
import matplotlib.lines as lines
import numpy as np
def loadDataset(dataName, datasetName, useDeltaEncoder=False):
fileDir = os.path.join('./{}'.format(datasetName),
dataName, dataName+'_TRAIN')
trainData = np.loadtxt(fileDir, delimiter=',')
trainLabel = trainData[:, 0].astype('int')
trainData = trainData[:, 1:]
fileDir = os.path.join('./{}'.format(datasetName),
dataName, dataName + '_TEST')
testData = np.loadtxt(fileDir, delimiter=',')
testLabel = testData[:, 0].astype('int')
testData = testData[:, 1:]
if useDeltaEncoder:
trainData = np.diff(trainData)
testData = np.diff(testData)
classList = np.unique(trainLabel)
classMap = {}
for i in range(len(classList)):
classMap[classList[i]] = i
for i in range(len(trainLabel)):
trainLabel[i] = classMap[trainLabel[i]]
for i in range(len(testLabel)):
testLabel[i] = classMap[testLabel[i]]
return trainData, trainLabel, testData, testLabel
def listDataSets(datasetName):
dataSets = [d for d in os.listdir('./{}'.format(datasetName)) if os.path.isdir(
os.path.join('./{}'.format(datasetName), d))]
return dataSets
def calculateAccuracy(distanceMat, trainLabel, testLabel):
outcome = []
for i in range(len(testLabel)):
predictedClass = trainLabel[np.argmax(distanceMat[i, :])]
correct = 1 if predictedClass == testLabel[i] else 0
outcome.append(correct)
accuracy = np.mean(np.array(outcome))
return accuracy, outcome
def calculateEuclideanModelAccuracy(trainData, trainLabel, testData, testLabel):
outcomeEuclidean = []
for i in range(testData.shape[0]):
predictedClass = one_nearest_neighbor(trainData, trainLabel, testData[i, :])
correct = 1 if predictedClass == testLabel[i] else 0
outcomeEuclidean.append(correct)
return outcomeEuclidean
def one_nearest_neighbor(trainData, trainLabel, unknownSequence):
"""
One nearest neighbor with Euclidean Distance
@param trainData (nSample, NT) training data
@param trainLabel (nSample, ) training data labels
@param unknownSequence (1, NT) sequence to be classified
"""
distance = np.zeros((trainData.shape[0],))
for i in range(trainData.shape[0]):
distance[i] = np.sqrt(np.sum(np.square(trainData[i, :]-unknownSequence)))
predictedClass = trainLabel[np.argmin(distance)]
return predictedClass
def sortDistanceMat(distanceMat, trainLabel, testLabel):
"""
Sort Distance Matrix according to training/testing class labels such that
nearby entries shares same class labels
:param distanceMat: original (unsorted) distance matrix
:param trainLabel: list of training labels
:param testLabel: list of testing labels
:return:
"""
numTrain = len(trainLabel)
numTest = len(testLabel)
sortIdxTrain = np.argsort(trainLabel)
sortIdxTest = np.argsort(testLabel)
distanceMatSort = np.zeros((numTest, numTrain))
for i in xrange(numTest):
for j in xrange(numTrain):
distanceMatSort[i, j] = distanceMat[sortIdxTest[i], sortIdxTrain[j]]
return distanceMatSort
def smoothArgMax(array):
idx = np.where(array == np.max(array))[0]
return np.median(idx).astype('int')
def calculateClassLines(trainLabel, testLabel, classList):
sortIdxTrain = np.argsort(trainLabel)
sortIdxTest = np.argsort(testLabel)
vLineLocs = []
hLineLocs = []
for c in classList[:-1]:
hLineLocs.append(np.max(np.where(testLabel[sortIdxTest] == c)[0]) + .5)
vLineLocs.append(np.max(np.where(trainLabel[sortIdxTrain] == c)[0]) + .5)
return vLineLocs, hLineLocs
def addClassLines(ax, vLineLocs, hLineLocs):
for vline in vLineLocs:
ax.add_line(lines.Line2D([vline, vline], ax.get_ylim(), color='k'))
for hline in hLineLocs:
ax.add_line(lines.Line2D(ax.get_xlim(), [hline, hline], color='k'))
def calculateEuclideanDistanceMat(testData, trainData):
EuclideanDistanceMat = np.zeros((testData.shape[0], trainData.shape[0]))
for i in range(testData.shape[0]):
for j in range(trainData.shape[0]):
EuclideanDistanceMat[i, j] = np.sqrt(np.sum(
np.square(testData[i, :] - trainData[j, :])))
return EuclideanDistanceMat
def overlapDist(s1, s2):
if len(s1.union(s2)) == 0:
return 0
else:
return float(len(s1.intersection(s2)))/len(s1.union(s2))
def calculateDistanceMat(activeColumnsTest, activeColumnsTrain):
nTest = len(activeColumnsTest)
nTrain = len(activeColumnsTrain)
sequenceLength = len(activeColumnsTrain[0])
activeColumnOverlapTest = np.zeros((nTest, nTrain))
for i in range(nTest):
for j in range(nTrain):
if type(activeColumnsTest[0]) is np.ndarray:
activeColumnOverlapTest[i, j] = np.sum(np.sqrt(np.multiply(activeColumnsTest[i], activeColumnsTrain[j])))
# activeColumnOverlapTest[i, j] = np.sum(np.minimum(activeColumnsTest[i], activeColumnsTrain[j]))
else:
for t in range(sequenceLength):
activeColumnOverlapTest[i, j] += overlapDist(
activeColumnsTest[i][t], activeColumnsTrain[j][t])
return activeColumnOverlapTest
def calculateDistanceMatTrain(activeColumnsTrain):
nTrain = len(activeColumnsTrain)
sequenceLength = len(activeColumnsTrain[0])
activeColumnOverlap = np.zeros((nTrain, nTrain))
for i in range(nTrain):
for j in range(i+1, nTrain):
for t in range(sequenceLength):
activeColumnOverlap[i, j] += len(
activeColumnsTrain[i][t].intersection(activeColumnsTrain[j][t]))
activeColumnOverlap[j, i] = activeColumnOverlap[i, j]
return activeColumnOverlap
def constructDistanceMat(distMatColumn, distMatCell, trainLabel, wOpt, bOpt):
numTest, numTrain = distMatColumn.shape
classList = np.unique(trainLabel).tolist()
distanceMat = np.zeros((numTest, numTrain))
for classI in classList:
classIidx = np.where(trainLabel == classI)[0]
distanceMat[:, classIidx] = \
(1 - wOpt[classI]) * distMatColumn[:, classIidx] + \
wOpt[classI] * distMatCell[:, classIidx] + bOpt[classI]
return distanceMat
def costFuncSharedW(newW, w, b, distMatColumn, distMatCell,
trainLabel, classList):
wTest = copy.deepcopy(w)
for classI in classList:
wTest[classI] = newW
distanceMatXV = constructDistanceMat(
distMatColumn, distMatCell, trainLabel, wTest, b)
accuracy, outcome = calculateAccuracy(distanceMatXV, trainLabel, trainLabel)
return -accuracy
def costFuncW(newW, classI, w, b, activeColumnOverlap, activeCellOverlap, trainLabel, classList):
wTest = copy.deepcopy(w)
wTest[classList[classI]] = newW
numXVRpts = 10
accuracyRpt = np.zeros((numXVRpts,))
for rpt in range(numXVRpts):
(activeColumnOverlapXV, activeCellOverlapXV,
trainLabelXV, trainLabeltrain) = generateNestedXCdata(
trainLabel, activeColumnOverlap, activeCellOverlap, seed=rpt)
distanceMat = constructDistanceMat(
activeColumnOverlapXV, activeCellOverlapXV, trainLabeltrain, wTest, b)
accuracy, outcome = calculateAccuracy(
distanceMat, trainLabeltrain, trainLabelXV)
accuracyRpt[rpt] = accuracy
return -np.mean(accuracyRpt)
def costFuncB(newB, classI, w, b, activeColumnOverlap, activeCellOverlap, trainLabel, classList):
bTest = copy.deepcopy(b)
bTest[classList[classI]] = newB
numXVRpts = 10
accuracyRpt = np.zeros((numXVRpts,))
for rpt in range(numXVRpts):
(activeColumnOverlapXV, activeCellOverlapXV,
trainLabelXV, trainLabeltrain) = generateNestedXCdata(
trainLabel, activeColumnOverlap, activeCellOverlap, seed=rpt)
distanceMat = constructDistanceMat(
activeColumnOverlapXV, activeCellOverlapXV, trainLabeltrain, w, bTest)
accuracy, outcome = calculateAccuracy(
distanceMat, trainLabeltrain, trainLabelXV)
accuracyRpt[rpt] = accuracy
return -np.mean(accuracyRpt)
def prepareClassifierInput(distMatColumn, distMatCell, classList, classLabel, options):
classIdxMap = {}
for classIdx in classList:
classIdxMap[classIdx] = np.where(classLabel == classIdx)[0]
classifierInput = []
numSample, numTrain = distMatColumn.shape
classList = classIdxMap.keys()
numClass = len(classList)
for i in range(numSample):
if options['useColumnRepresentation']:
columnNN = np.zeros((numClass,))
else:
columnNN = np.array([])
if options['useCellRepresentation']:
cellNN = np.zeros((numClass,))
else:
cellNN = np.array([])
for classIdx in classList:
if options['useColumnRepresentation']:
columnNN[classIdx] = np.max(
distMatColumn[i, classIdxMap[classIdx]])
if options['useCellRepresentation']:
cellNN[classIdx] = np.max(distMatCell[i, classIdxMap[classIdx]])
# if options['useColumnRepresentation']:
# columnNN[columnNN < np.max(columnNN)] = 0
# columnNN[columnNN == np.max(columnNN)] = 1
#
# if options['useCellRepresentation']:
# cellNN[cellNN < np.max(cellNN)] = 0
# cellNN[cellNN == np.max(cellNN)] = 1
classifierInput.append(np.concatenate((columnNN, cellNN)))
return classifierInput
def generateNestedXCdata(trainLabel, distMatColumn, distMatCell,
seed=1, xcPrct=0.5):
"""
Set aside a portion of the training data for nested cross-validation
:param trainLabel:
:param distMatColumn:
:param distMatCell:
:param xcPrct:
:return:
"""
np.random.seed(seed)
randomIdx = np.random.permutation(len(trainLabel))
numXVsamples = int(len(trainLabel) * xcPrct)
numTrainSample = len(trainLabel) - numXVsamples
selectXVSamples = randomIdx[:numXVsamples]
selectTrainSamples = randomIdx[numXVsamples:]
selectXVSamples = np.sort(selectXVSamples)
selectTrainSamples = np.sort(selectTrainSamples)
distMatColumnXV = np.zeros((numXVsamples, numTrainSample))
distMatCellXV = np.zeros((numXVsamples, numTrainSample))
for i in range(numXVsamples):
distMatColumnXV[i, :] = distMatColumn[
selectXVSamples[i], selectTrainSamples]
distMatCellXV[i, :] = distMatCell[
selectXVSamples[i], selectTrainSamples]
trainLabelXV = trainLabel[selectXVSamples]
trainLabeltrain = trainLabel[selectTrainSamples]
return (distMatColumnXV, distMatCellXV,
trainLabelXV, trainLabeltrain) | gpl-3.0 |
alphaBenj/zipline | tests/data/test_dispatch_bar_reader.py | 5 | 12612 | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import array, nan
from numpy.testing import assert_almost_equal
from pandas import DataFrame, Timestamp
from zipline.assets import Equity, Future
from zipline.data.dispatch_bar_reader import (
AssetDispatchMinuteBarReader,
AssetDispatchSessionBarReader,
)
from zipline.data.resample import (
MinuteResampleSessionBarReader,
ReindexMinuteBarReader,
ReindexSessionBarReader,
)
from zipline.testing.fixtures import (
WithBcolzEquityMinuteBarReader,
WithBcolzEquityDailyBarReader,
WithBcolzFutureMinuteBarReader,
WithTradingSessions,
ZiplineTestCase,
)
OHLC = ['open', 'high', 'low', 'close']
class AssetDispatchSessionBarTestCase(WithBcolzEquityDailyBarReader,
WithBcolzFutureMinuteBarReader,
WithTradingSessions,
ZiplineTestCase):
TRADING_CALENDAR_STRS = ('us_futures', 'NYSE')
TRADING_CALENDAR_PRIMARY_CAL = 'us_futures'
ASSET_FINDER_EQUITY_SIDS = 1, 2, 3
START_DATE = Timestamp('2016-08-22', tz='UTC')
END_DATE = Timestamp('2016-08-24', tz='UTC')
@classmethod
def make_future_minute_bar_data(cls):
m_opens = [
cls.trading_calendar.open_and_close_for_session(session)[0]
for session in cls.trading_sessions['us_futures']]
yield 10001, DataFrame({
'open': [10000.5, 10001.5, nan],
'high': [10000.9, 10001.9, nan],
'low': [10000.1, 10001.1, nan],
'close': [10000.3, 10001.3, nan],
'volume': [1000, 1001, 0],
}, index=m_opens)
yield 10002, DataFrame({
'open': [20000.5, nan, 20002.5],
'high': [20000.9, nan, 20002.9],
'low': [20000.1, nan, 20002.1],
'close': [20000.3, nan, 20002.3],
'volume': [2000, 0, 2002],
}, index=m_opens)
yield 10003, DataFrame({
'open': [nan, 30001.5, 30002.5],
'high': [nan, 30001.9, 30002.9],
'low': [nan, 30001.1, 30002.1],
'close': [nan, 30001.3, 30002.3],
'volume': [0, 3001, 3002],
}, index=m_opens)
@classmethod
def make_equity_daily_bar_data(cls):
sessions = cls.trading_sessions['NYSE']
yield 1, DataFrame({
'open': [100.5, 101.5, nan],
'high': [100.9, 101.9, nan],
'low': [100.1, 101.1, nan],
'close': [100.3, 101.3, nan],
'volume': [1000, 1001, 0],
}, index=sessions)
yield 2, DataFrame({
'open': [200.5, nan, 202.5],
'high': [200.9, nan, 202.9],
'low': [200.1, nan, 202.1],
'close': [200.3, nan, 202.3],
'volume': [2000, 0, 2002],
}, index=sessions)
yield 3, DataFrame({
'open': [301.5, 302.5, nan],
'high': [301.9, 302.9, nan],
'low': [301.1, 302.1, nan],
'close': [301.3, 302.3, nan],
'volume': [3001, 3002, 0],
}, index=sessions)
@classmethod
def make_futures_info(cls):
return DataFrame({
'sid': [10001, 10002, 10003],
'root_symbol': ['FOO', 'BAR', 'BAZ'],
'symbol': ['FOOA', 'BARA', 'BAZA'],
'start_date': [cls.START_DATE] * 3,
'end_date': [cls.END_DATE] * 3,
# TODO: Make separate from 'end_date'
'notice_date': [cls.END_DATE] * 3,
'expiration_date': [cls.END_DATE] * 3,
'multiplier': [500] * 3,
'exchange': ['CME'] * 3,
})
@classmethod
def init_class_fixtures(cls):
super(AssetDispatchSessionBarTestCase, cls).init_class_fixtures()
readers = {
Equity: ReindexSessionBarReader(
cls.trading_calendar,
cls.bcolz_equity_daily_bar_reader,
cls.START_DATE,
cls.END_DATE),
Future: MinuteResampleSessionBarReader(
cls.trading_calendar,
cls.bcolz_future_minute_bar_reader,
)
}
cls.dispatch_reader = AssetDispatchSessionBarReader(
cls.trading_calendar,
cls.asset_finder,
readers
)
def test_load_raw_arrays(self):
sessions = self.trading_calendar.sessions_in_range(
self.START_DATE, self.END_DATE)
results = self.dispatch_reader.load_raw_arrays(
['high', 'volume'],
sessions[0], sessions[2], [2, 10003, 1, 10001])
expected_per_sid = (
(2, [array([200.9, nan, 202.9]),
array([2000, 0, 2002])],
"sid=2 should have values on the first and third sessions."),
(10003, [array([nan, 30001.9, 30002.9]),
array([0, 3001, 3002])],
"sid=10003 should have values on the second and third sessions."),
(1, [array([100.9, 101.90, nan]),
array([1000, 1001, 0])],
"sid=1 should have values on the first and second sessions."),
(10001, [array([10000.9, 10001.9, nan]),
array([1000, 1001, 0])],
"sid=10001 should have a values on the first and second "
"sessions."),
)
for i, (sid, expected, msg) in enumerate(expected_per_sid):
for j, result in enumerate(results):
assert_almost_equal(result[:, i], expected[j], err_msg=msg)
class AssetDispatchMinuteBarTestCase(WithBcolzEquityMinuteBarReader,
WithBcolzFutureMinuteBarReader,
ZiplineTestCase):
TRADING_CALENDAR_STRS = ('us_futures', 'NYSE')
TRADING_CALENDAR_PRIMARY_CAL = 'us_futures'
ASSET_FINDER_EQUITY_SIDS = 1, 2, 3
START_DATE = Timestamp('2016-08-24', tz='UTC')
END_DATE = Timestamp('2016-08-24', tz='UTC')
@classmethod
def make_equity_minute_bar_data(cls):
minutes = cls.trading_calendars[Equity].minutes_for_session(
cls.START_DATE)
yield 1, DataFrame({
'open': [100.5, 101.5],
'high': [100.9, 101.9],
'low': [100.1, 101.1],
'close': [100.3, 101.3],
'volume': [1000, 1001],
}, index=minutes[[0, 1]])
yield 2, DataFrame({
'open': [200.5, 202.5],
'high': [200.9, 202.9],
'low': [200.1, 202.1],
'close': [200.3, 202.3],
'volume': [2000, 2002],
}, index=minutes[[0, 2]])
yield 3, DataFrame({
'open': [301.5, 302.5],
'high': [301.9, 302.9],
'low': [301.1, 302.1],
'close': [301.3, 302.3],
'volume': [3001, 3002],
}, index=minutes[[1, 2]])
@classmethod
def make_future_minute_bar_data(cls):
e_m = cls.trading_calendars[Equity].minutes_for_session(
cls.START_DATE)
f_m = cls.trading_calendar.minutes_for_session(
cls.START_DATE)
# Equity market open occurs at loc 930 in Future minutes.
minutes = [f_m[0], e_m[0], e_m[1]]
yield 10001, DataFrame({
'open': [10000.5, 10930.5, 10931.5],
'high': [10000.9, 10930.9, 10931.9],
'low': [10000.1, 10930.1, 10931.1],
'close': [10000.3, 10930.3, 10931.3],
'volume': [1000, 1930, 1931],
}, index=minutes)
minutes = [f_m[1], e_m[1], e_m[2]]
yield 10002, DataFrame({
'open': [20001.5, 20931.5, 20932.5],
'high': [20001.9, 20931.9, 20932.9],
'low': [20001.1, 20931.1, 20932.1],
'close': [20001.3, 20931.3, 20932.3],
'volume': [2001, 2931, 2932],
}, index=minutes)
minutes = [f_m[2], e_m[0], e_m[2]]
yield 10003, DataFrame({
'open': [30002.5, 30930.5, 30932.5],
'high': [30002.9, 30930.9, 30932.9],
'low': [30002.1, 30930.1, 30932.1],
'close': [30002.3, 30930.3, 30932.3],
'volume': [3002, 3930, 3932],
}, index=minutes)
@classmethod
def make_futures_info(cls):
return DataFrame({
'sid': [10001, 10002, 10003],
'root_symbol': ['FOO', 'BAR', 'BAZ'],
'symbol': ['FOOA', 'BARA', 'BAZA'],
'start_date': [cls.START_DATE] * 3,
'end_date': [cls.END_DATE] * 3,
# TODO: Make separate from 'end_date'
'notice_date': [cls.END_DATE] * 3,
'expiration_date': [cls.END_DATE] * 3,
'multiplier': [500] * 3,
'exchange': ['CME'] * 3,
})
@classmethod
def init_class_fixtures(cls):
super(AssetDispatchMinuteBarTestCase, cls).init_class_fixtures()
readers = {
Equity: ReindexMinuteBarReader(
cls.trading_calendar,
cls.bcolz_equity_minute_bar_reader,
cls.START_DATE,
cls.END_DATE),
Future: cls.bcolz_future_minute_bar_reader
}
cls.dispatch_reader = AssetDispatchMinuteBarReader(
cls.trading_calendar,
cls.asset_finder,
readers
)
def test_load_raw_arrays_at_future_session_open(self):
f_minutes = self.trading_calendar.minutes_for_session(self.START_DATE)
results = self.dispatch_reader.load_raw_arrays(
['open', 'close'],
f_minutes[0], f_minutes[2], [2, 10003, 1, 10001])
expected_per_sid = (
(2, [array([nan, nan, nan]),
array([nan, nan, nan])],
"Before Equity market open, sid=2 should have no values."),
(10003, [array([nan, nan, 30002.5]),
array([nan, nan, 30002.3])],
"sid=10003 should have a value at the 22:03 occurring "
"before the session label, which will be the third minute."),
(1, [array([nan, nan, nan]),
array([nan, nan, nan])],
"Before Equity market open, sid=1 should have no values."),
(10001, [array([10000.5, nan, nan]),
array([10000.3, nan, nan])],
"sid=10001 should have a value at the market open."),
)
for i, (sid, expected, msg) in enumerate(expected_per_sid):
for j, result in enumerate(results):
assert_almost_equal(result[:, i], expected[j], err_msg=msg)
results = self.dispatch_reader.load_raw_arrays(
['open'], f_minutes[0], f_minutes[2], [2, 10003, 1, 10001])
def test_load_raw_arrays_at_equity_session_open(self):
e_minutes = self.trading_calendars[Equity].minutes_for_session(
self.START_DATE)
results = self.dispatch_reader.load_raw_arrays(
['open', 'high'], e_minutes[0], e_minutes[2],
[10002, 1, 3, 10001])
expected_per_sid = (
(10002, [array([nan, 20931.5, 20932.5]),
array([nan, 20931.9, 20932.9])],
"At Equity market open, sid=10002 should have values at the "
"second and third minute."),
(1, [array([100.5, 101.5, nan]),
array([100.9, 101.9, nan])],
"At Equity market open, sid=1 should have values at the first "
"and second minute."),
(3, [array([nan, 301.5, 302.5]),
array([nan, 301.9, 302.9])],
"At Equity market open, sid=3 should have a values at the second "
"and third minute."),
(10001, [array([10930.5, 10931.5, nan]),
array([10930.9, 10931.9, nan])],
"At Equity market open, sid=10001 should have a values at the "
"first and second minute."),
)
for i, (sid, expected, msg) in enumerate(expected_per_sid):
for j, result in enumerate(results):
assert_almost_equal(result[:, i], expected[j], err_msg=msg)
| apache-2.0 |
prasnko/ml | DecisionTrees/DecisionTree.py | 1 | 11953 | #-------------------------------------------------------------------------------
# Name: Decision Trees
# Purpose: Machine Learning Homework 1
#
# Author: PRASANNA
#
# Created: 04/02/2013
# Copyright: (c) PRASANNA 2013
# Licence: PVK
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#
# B> Accuracy of decision tree on training file train.txt = 87.5%
#
# C> Accuracy of decision tree on test file test.txt = 83.25%
#
# Data Structures and Functions
#
# calcEntropy -> Calculates the entropy for the entire dataset that is passes to it based on the class values and returns it.
# labelCounts stores the counts of each label
#
# splitDataset -> Splits the dataset based on the attribte index and it's value both of which are passed to it and returns the sub dataset.
# newFeatVec stores a list of features without the attribute value that is matched.
# newX stores the reduced dataset.
#
# getBestFeature -> Returns the feature that provides the maximum information gain.
# featList stores the feature list for each attribute.
# subDataset stores the reduced dataset that is returned by SplitDataset function
#
# majorityCount-> Returns the class label that has majority of the examples.
# classCount dictionary is used to store the count of examples of each class value with class value as key and count as value.
# checkCompList is passed to check if we should check count of complete list in case of tie among subset of class values for examples at leaf.
#
# createTree -> It is used to create the decision tree. It return the decision tree that is created in the form of a dictionary.
# decisionTree is a dictionary that is used to build the decision Tree recursively by storing another dictionary for next level.
# labels stores a list of attribute names.
# level variable helps in formating the decision tree based on the level of the recursive call
#
# classify -> It is used to classify a given feature vector and returns the class label that the feature vector belongs to.
# classList is a list that stores class values of entire dataset
#
# calcAccuracy -> Calculates decision tree accuracy using actual and predicted class labels that are passed in the form of lists
# and returns count of correctly classified instances.
#
# formatText -> Formats the text document that is passed to it to return two lists:
# words: which is training data in the form of list of lists.
# attributes: which is a list of names of the features.
#
# partialSetTest -> part d is carried out with the help of this method. It generates the training data and attributes and then generates
# decision tree and calculates accuracy on increasing subsets of dataset by size 50. Finally, it plots the result using matplotlib library.
#
#-------------------------------------------------------------------------------
from math import log
import operator
import re
import sys
from sys import stdout
import numpy
import matplotlib.pyplot as plt
import pylab as pl
def calcEntropy(dataSet):
numEntries = len(dataSet)
labelCounts = {}
# Classifying each feature vector in the dataset based on the class label
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
entropy = 0.0
# Calculating probability and then the entropy
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
entropy -= prob * log(prob,2)
return entropy
def splitDataSet(X, attrIndex, value):
newX = []
for featVec in X:
# Keeping only those feature vectors whose feature value matches the required but removing the value from the vector.
if featVec[attrIndex] == value:
newFeatVec = featVec[:attrIndex]
newFeatVec.extend(featVec[attrIndex+1:])
newX.append(newFeatVec)
return newX
def getBestFeature(dataSet):
numFeatures = len(dataSet[0]) - 1
entropy = calcEntropy(dataSet)
bestInfoGain = 0.0; bestFeature = -1
# Check each feature in the dataset, if it's information gain is highest.
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
# For each feature value, split the dataset on that value and calculate the entropy on it.
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcEntropy(subDataSet)
infoGain = entropy - newEntropy
# Highest information gain providing feature is the best attribute.
if (infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def majorityCount(classList,checkCompList):
classCount={}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
# Sorting ensures we have maximum value at index 0
sortedClassCount = sorted(classCount.iteritems(),
key=operator.itemgetter(1), reverse=True)
if len(sortedClassCount)>1:
# If both classes are equally distributed then return -1 so maximum among entire dataset can be taken.
if sortedClassCount[0][1]==sortedClassCount[1][1] and checkCompList==0:
return -1
else:
return sortedClassCount[0][0]
else:
return sortedClassCount[0][0]
def createTree(dataSet,labels,wholeClassList,wholeDataSet,level):
## if len(dataSet)> len(wholeDataSet):
## wholeDataSet = dataSet[:]
level+=1
classList = [example[-1] for example in dataSet]
if len(classList) > len(wholeClassList):
wholeClassList = classList[:]
if len(classList)>0:
if classList.count(classList[0]) == len(classList):
return classList[0]
if len(dataSet)>0:
# If no more examples remain, take maximum of class values at leaf node else maximum from whole dataset.
if len(dataSet[0]) == 1:
leafCount = majorityCount(classList,0)
if leafCount !=-1:
return leafCount
else:
return majorityCount(wholeClassList,1)
else:
return majorityCount(wholeClassList,1)
bestFeat = getBestFeature(dataSet)
bestFeatLabel = labels[bestFeat]
# Store decision tree recursively in dictionary.
decisionTree = {bestFeatLabel:{}}
del(labels[bestFeat])
featValues = [example[bestFeat] for example in wholeDataSet]
# Extracting unique value of features for given attribute.
uniqueVals = set(featValues)
# For each value of the best feature selected, generate the tree.
for value in uniqueVals:
treeOutput = ''
if level==1:
stdout.write('\n')
for i in range(0,level-1):
if i==0 :
stdout.write("\n")
treeOutput += '| '
treeOutput += bestFeatLabel+'='+str(value)
stdout.write("%s" %treeOutput)
subLabels = labels[:]
decisionTree[bestFeatLabel][value] = createTree(splitDataSet\
(dataSet, bestFeat, value),subLabels,wholeClassList,wholeDataSet,level)
# If value returned from lower level is a number return it as class label.
if type(decisionTree[bestFeatLabel][value]).__name__ != 'dict':
stdout.write(":%d" % int(decisionTree[bestFeatLabel][value]))
return decisionTree
# Classifying one feature vector at a time
def classify(inputTree,attributes,featVec,wholeClassList):
firstLevel = inputTree.keys()[0]
secondLevel = inputTree[firstLevel]
# Feature index of attribute selected at first level
featIndex = attributes.index(firstLevel)
# Traversing down the tree recursively
k= secondLevel.keys()
i=1
clAssigned =0
# For keys at the next level if a match is found process further
for key in secondLevel.keys():
if featVec[featIndex] == key:
clAssigned = 1
# If key type is dictionary it means next level is not a leaf node, so process recursively.
if type(secondLevel[key]).__name__=='dict':
classLabel = classify(secondLevel[key],attributes,featVec,wholeClassList)
else:
# At leaf level assign the class label
classLabel = secondLevel[key]
elif i == len(k) and clAssigned == 0:
classLabel = majorityCount(wholeClassList,1)
i+=1
return classLabel
def calcAccuracy(predicted,actual):
accCount=0
for i in range(len(actual)):
if predicted[i] == actual[i]:
accCount=accCount+1
return accCount
def formatText(x):
lines = []
attributes = []
words = []
text = x.read()
lines = text.split('\n')
# spliting text into a list of words
attr_name = re.split('\W+',lines[0])
i=0
# Extracting the attribute names
for attr in attr_name:
if i%2==0:
attributes.append(attr)
i= i+1
# Removing the attribute name from training data.
lines.remove(str(lines[0]))
lines.remove('')
# Storing each feature vector in a list.
for line in lines:
words.append(line.split('\t'))
return words,attributes
# Function to check effect of training data size on accuracy and drawing learning curve.
def partialSetTest():
lines = []
attributes = []
words = []
line =[]
actual = []
predicted = []
leafValues = []
wholeClassList = []
wholeDataset = []
X=[]
Y=[]
## train = open(r'C:\Prasanna\Spring13\ML\HW1\data\train.txt')
## test = open(r'C:\Prasanna\Spring13\ML\HW1\data\test.txt')
train = open(sys.argv[1])
test = open(sys.argv[2])
wordsTrain,attributesTrain = formatText(train)
wordsTest,attributesTest = formatText(test)
for j in range(50,len(wordsTrain),50):
X.append(j)
partialTrainSet = wordsTrain[0:j]
attr = attributesTrain[:]
tree = createTree(partialTrainSet,attr,wholeClassList,partialTrainSet,0)
## leafValues,wholeClassList
# Actual class label
for word in wordsTest:
actual.append(word[-1])
featLabels = attributesTest[:]
wholeClassList = [example[-1] for example in partialTrainSet]
for word in wordsTest:
featVec = word[:-1]
predicted.append(classify(tree,featLabels,featVec,wholeClassList))
accCount = calcAccuracy(predicted,actual)
Y.append(float(accCount)*100/float(len(actual)))
pl.xlabel('Training set size')
pl.ylabel('Accuracy')
pl.title('Learning Curve')
## pl.xlim(0,900)
## pl.ylim(0,100)
# Matplotlib method to plot and show the data.
pl.plot(X,Y)
pl.show()
def main():
## partialSetTest()
lines = []
attributes = []
words = []
line =[]
actual = []
predicted = []
leafValues = []
wholeClassList = []
wholeDataset = []
## train = open(r'C:\Prasanna\Spring13\ML\HW1\data\train.txt')
## test = open(r'C:\Prasanna\Spring13\ML\HW1\data\test.txt')
train = open(sys.argv[1])
test = open(sys.argv[2])
# Formating the train and text documents in the form of lists for processing in the algorithm
wordsTrain,attributesTrain = formatText(train)
wordsTest,attributesTest = formatText(test)
print 'Training Instances size:'+ str(len(wordsTrain))
print 'Attributes:'+str(len(attributesTrain))
for attr in attributesTrain:
print attr
print 'Testing Instances size:'+ str(len(wordsTest))
# Sending second copy of training data as feature values need to be iterated on whole training dataset and not the reduced dataset which would miss out some values in the decision tree.
tree = createTree(wordsTrain,attributesTrain,wholeClassList,wordsTrain,0)
# Actual class label
for word in wordsTest:
actual.append(word[-1])
featLabels = attributesTest[:]
wholeClassList = [example[-1] for example in wordsTrain]
# Generating list of predicted class values using classify function.
for word in wordsTest:
featVec = word[:-1]
predicted.append(classify(tree,featLabels,featVec,wholeClassList))
accCount = calcAccuracy(predicted,actual)
print '\n'+'Correctly Classified Instances: '+str(accCount)
print 'Incorrectly Classified Instances: '+str(len(actual)-accCount)
print 'Accuracy= '+ str(float(accCount)*100/float(len(actual))) +' %'
if __name__ == '__main__':
main()
| mit |
aewhatley/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
Aurora0001/LearnProgrammingBot | main.py | 1 | 13523 | #!/usr/bin/python2
from __future__ import print_function
from sklearn.pipeline import make_union
from sklearn.base import TransformerMixin
from sklearn.feature_extraction.text import TfidfVectorizer
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from sklearn import svm
import numpy as np
import logging
import webbrowser
import argparse
import sys
import re
from settings import LOGFILE_URI, DATABASE_URI, LOG_LEVEL, CLIENT_ID, CLIENT_SECRET, CLIENT_ACCESSCODE, SUBREDDIT, REDIRECT_URI, LOG_FORMAT
import model
import praw
# This allows for Python 3 compatibility by replacing input() on Python 2
if sys.version_info[:2] <= (2, 7):
input = raw_input
responses = {
'off_topic': '''
Hi! Your post might not attract good responses on /r/learnprogramming.
This may be because you didn't include a code sample, provided very little
detail or linked content that doesn't seem relevant. You can improve your post
by:
- [Asking Questions The Smart Way](http://catb.org/~esr/faqs/smart-questions.html)
- Avoiding posting links without any explanation, discussion or question (links
might get a better response on /r/programming)
- Using code pastebins (images don't count!)
- Reviewing the post guidelines on the sidebar
Don't worry about this message if you think it's a mistake - it may just be an
error in my classifier, but please check the resources above anyway to make
sure that your post gets the best responses.
''',
'faq_get_started': '''
Hello! Your post seems to be about getting started with programming or a
project. You can find some great resources about this in the
[/r/learnprogramming FAQ](https://www.reddit.com/r/learnprogramming/wiki/faq).
Specifically, you might find these useful:
- [Getting Started with Programming](https://www.reddit.com/r/learnprogramming/wiki/gettingstarted)
- [FAQ - How do I get started?](https://www.reddit.com/r/learnprogramming/wiki/faq#wiki_how_do_i_get_started_with_programming.3F)
- [FAQ - How do I get started with a large project?](https://www.reddit.com/r/learnprogramming/wiki/faq#wiki_how_do_i_get_started_with_a_large_project_and_keep_up_with_it.3F)
''',
'faq_career': '''
Hello! Your post seems to be about careers in programming. You'll
be able to get the best advice in the subreddit /r/cscareerquestions, who
specifically deal with questions like this.
The wiki also has some useful advice about this:
- [FAQ - Careers](https://www.reddit.com/r/learnprogramming/wiki/faq#wiki_careers_and_jobs)
''',
'faq_resource': '''
Hello! You seem to be looking for a resource or tutorial. The /r/learnprogramming
wiki has a comprehensive list of resources that might be useful to you, but if
what you're looking for isn't on there, please help by adding it!
- [Online Resources](http://www.reddit.com/r/learnprogramming/wiki/online)
- [Books](http://www.reddit.com/r/learnprogramming/wiki/books)
- [Programming Challenges](http://www.reddit.com/r/learnprogramming/wiki/faq#wiki_where_can_i_find_practice_exercises_and_project_ideas.3F)
You might also like the [Awesome Lists](https://awesomelists.top/), which are
curated lists for the best libraries, tools and resources for most programming
languages, topics and tools.
''',
'faq_tool': '''
Hello! Your post seems to be about a programming tool, IDE or hardware (e.g. a laptop).
Take a look at the following links:
- /r/suggestalaptop
- [Wiki - Programming Tools](https://www.reddit.com/r/learnprogramming/wiki/tools)
''',
'faq_language': '''
Hello! You seem to be asking about which programming language to use for a
project or which language to learn. This is quite a frequent question so you
might find that you get the best answer from the
[FAQ](https://www.reddit.com/r/learnprogramming/wiki/faq#wiki_which_programming_language_should_i_start_with.3F).
Also, why not try the [choosing a language tool](http://choosing-a-language.techboss.co/)
by Techboss which should guide you in picking a suitable language.
The general advice here is that you should focus on one programming language
that you know well, so you can improve your *algorithmic thinking* skills.
'Language hopping' tends to be a bad idea because you are always learning
syntax, which is less important.
''',
'faq_other': '''
Hello! Your post seems similar to an FAQ question, but I can't specifically
figure out which section would be helpful to you.
Take a look through [the wiki](https://www.reddit.com/r/learnprogramming/wiki/index)
if you haven't already, and check to see if it helps you. If not, please
report an issue so I can give more specific help in future!
''',
'faq_resource_podcast': '''
Looking for a podcast? You might find these threads useful:
- [Podcasts for Beginners](https://www.reddit.com/r/learnprogramming/comments/47dusa/podcasts_any_recommendations_for_a_beginner/)
- [Advanced Programming Podcasts](https://www.reddit.com/r/learnprogramming/comments/3pw6gl/advanced_programming_concepts_or_fun_fact_type/)
''',
'faq_what_now': '''
Hi! If you've just completed your first course and aren't sure where to go next, take a look at some of these guides and see if they help:
- [FAQ - Now what do I do?](https://www.reddit.com/r/learnprogramming/wiki/faq#wiki_now_what_do_i_do.3F)
- [How do I move from beginner to intermediate level?](https://www.reddit.com/r/learnprogramming/wiki/faq#wiki_how_do_i_move_from_an_beginning_to_an_intermediate_level.3F)
'''
}
post_signature = '''
---
I am a bot for /r/learnprogramming using supervised learning to provide helpful
responses to common posts. I'm open source and accept pull requests and
contributions!
[[Learn More]](https://github.com/Aurora0001/LearnProgrammingBot)
[[Report an Issue (or reply below with feedback)]](https://github.com/Aurora0001/LearnProgrammingBot/issues)
'''
class PostTransformer(TransformerMixin):
"""
Transforms posts on four characteristics:
- Amount of links
- Length of post
- Contains block code
- Contains inline code
"""
def __init__(self, word_k=10000, link_k=5):
# TODO: grid search for best constants
self.word_k = word_k
self.link_k = link_k
def fit(self, *args):
return self
def transform(self, X, *args, **kwargs):
ret = []
for item in X:
ret.append(float(len(item)) / self.word_k)
ret.append(float(item.count('http')) / self.link_k)
ret.append(float(item.count(' ')) / len(item))
y = np.array(ret).reshape(-1, 3)
return y
fit_transform = transform
class Classifier(object):
"""
Wrapper for the vectorizer and classifier that handles training of both.
"""
def __init__(self, training_values=None, training_targets=None):
self.vectorizer = make_union(TfidfVectorizer(), PostTransformer())
# Set using parameter_search. TODO: review after updating
# corpus.
self.classifier = svm.LinearSVC(C=1, loss='squared_hinge', multi_class='ovr', class_weight='balanced', tol=1e-6)
if training_values is not None and training_targets is not None:
self.fit(training_values, training_targets)
def fit(self, training_values, training_targets):
training_values = self.vectorizer.fit_transform(training_values).toarray()
self.classifier.fit(training_values, training_targets)
def classify(self, text):
transformed_text = self.vectorizer.transform([text]).toarray()
return self.classifier.predict(transformed_text)
def get_probability(self, text):
transformed_text = self.vectorizer.transform([text]).toarray()
return self.classifier.decision_function(transformed_text)
def connect_to_database(uri):
engine = create_engine(uri)
return sessionmaker(bind=engine)
def get_reddit_client():
reddit = praw.Reddit(user_agent='all platforms:Learn Programming Bot:v0.2.0-pre (by /u/Aurora0001, contact at github.com/Aurora0001/LearnProgrammingBot/issues)')
reddit.set_oauth_app_info(client_id=CLIENT_ID, client_secret=CLIENT_SECRET, redirect_uri=REDIRECT_URI)
return reddit
def run_bot(args):
logging.basicConfig(filename=LOGFILE_URI, level=LOG_LEVEL, format=LOG_FORMAT)
logging.info('Connecting to database {}'.format(DATABASE_URI))
Session = connect_to_database(DATABASE_URI)
logging.info('Database connection OK')
session = Session()
data = session.query(model.Corpus).all()
data_values = [col.title + ' ' + col.text for col in data]
data_targets = [col.category for col in data]
logging.info('Training classifier with {} values'.format(len(data_values)))
classifier = Classifier(data_values, data_targets)
logging.info('Classifier trained')
logging.info('Connecting to reddit...')
reddit = get_reddit_client()
logging.info('Authorizing...')
access_information = reddit.get_access_information(CLIENT_ACCESSCODE)
reddit.set_access_credentials(**access_information)
logging.info('Logged in successfully.')
for message in praw.helpers.submission_stream(reddit, SUBREDDIT, limit=5, verbosity=0):
message_text = message.title + ' ' + message.selftext
pred = classifier.classify(message_text)[0]
if pred in responses:
if args.supervised and input('Classify {} as {}? (y/n) '.format(message.id, pred)).lower() != 'y':
continue
try:
message.add_comment(responses[pred] + post_signature)
except praw.errors.RateLimitExceeded:
# TODO:
# Ideally, errors should actually be handled properly. Perhaps a dequeue could be used
# to store all the posts which failed, which could be retried every minute (or so)
logging.error('Rate limit exceeded, cannot post to thread {}'.format(message.title))
def train_id(args):
train_bot(args, True)
def train_batch(args):
train_bot(args, False)
def train_bot(args, by_id):
reddit = get_reddit_client()
if by_id:
messages = [reddit.get_submission(submission_id=args.id)]
else:
messages = reddit.get_subreddit(SUBREDDIT).get_new(limit=args.limit)
for message in messages:
print(message.title)
print('----------')
print(message.selftext)
print('')
message_type = input('Enter category: ')
if message_type == '':
continue
Session = connect_to_database(DATABASE_URI)
session = Session()
session.add(model.Corpus(title=message.title, text=message.selftext, category=message_type))
session.commit()
def create_token(args):
reddit = get_reddit_client()
url = reddit.get_authorize_url('uniqueKey', 'identity,submit,read', True)
webbrowser.open(url)
print(' !!! ')
print('Please copy the access code that you are redirected to ')
print('like this: http://praw.readthedocs.org/en/latest/_images/CodeUrl.png')
print('You need to put it in settings.py as CLIENT_ACCESSCODE')
print(' !!! ')
def classify_item(args):
reddit = get_reddit_client()
post = reddit.get_submission(submission_id=args.id)
Session = connect_to_database(DATABASE_URI)
session = Session()
data = session.query(model.Corpus).all()
data_values = [col.title + ' ' + col.text for col in data]
data_targets = [col.category for col in data]
classifier = Classifier(data_values, data_targets)
post_text = post.title + ' ' + post.selftext
classification = classifier.classify(post_text)[0]
probability = classifier.get_probability(post_text)[0]
print('p({}) = {}'.format(classification, max(probability)))
print('-----------')
for (i, class_) in enumerate(classifier.classifier.classes_):
print('p({}) = {}'.format(class_, probability[i]))
def initialise_database(args):
engine = create_engine(DATABASE_URI)
model.Corpus.metadata.create_all(engine)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
parser_run = subparsers.add_parser('run', help='runs the bot')
parser_run.add_argument('--supervised', action='store_true')
parser_run.set_defaults(func=run_bot)
parser_train = subparsers.add_parser('train', help='adds training data to the bot (using a specific id)')
parser_train.add_argument('--id', type=str, required=True, help='the submission id of the post to review')
parser_train.set_defaults(func=train_id)
parser_batch = subparsers.add_parser('train-batch', help='adds training data to the bot in batches')
parser_batch.add_argument('--limit', type=int, required=True, help='the maximum number of posts to fetch')
parser_batch.set_defaults(func=train_batch)
parser_token = subparsers.add_parser('create-token', help='gets an access token with your client id/secret')
parser_token.set_defaults(func=create_token)
parser_init = subparsers.add_parser('init', help='initialises the database, ready to insert training data')
parser_init.set_defaults(func=initialise_database)
parser_classify = subparsers.add_parser('classify', help='classifies a specific post using the trained data')
parser_classify.add_argument('--id', type=str, required=True, help='the submission id of the post to classify')
parser_classify.set_defaults(func=classify_item)
args = parser.parse_args(sys.argv[1:])
args.func(args)
| mit |
anparser/anparser | anparser/anparser.py | 1 | 22050 | # -*- coding: utf-8 -*-
"""
anparser - an Open Source Android Artifact Parser
Copyright (C) 2015 Chapin Bryce
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'cbryce'
__license__ = 'GPLv3'
__date__ = '20150102'
__version__ = '0.00'
# Imports
from collections import OrderedDict
import logging
import os
import sys
import pandas as pd
import plugins
import writers
def scan_for_files(input_dir):
"""
Iterate across a directory and return a list of files
:param input_dir: string path to a directory
:return: Array of relative file paths
"""
# TODO: Add ability to filter scanned files by name, path or extension
if os.path.isfile(input_dir):
return None
# Initialize Variables
file_list = []
# Iterate
for root, subdir, files in os.walk(input_dir, topdown=True):
for file_name in files:
current_file = os.path.join(root, file_name)
current_file = current_file.decode('utf-8')
file_list.append(current_file)
return file_list
if __name__ == "__main__":
import argparse
# Handle Command-Line Input
parser = argparse.ArgumentParser(description="Open Source Android Artifact Parser")
parser.add_argument('evidence', help='Directory of Android Acquisition')
parser.add_argument('destination', help='Destination directory to write output files to')
parser.add_argument('-o', help='Output Type: csv, xlsx', default='csv')
parser.add_argument('-y', help='Provide file path to custom Yara rules and run Yara')
parser.add_argument('-s', help='Regular expression searching - supply file path with new line separated '
'searches or type a single search')
args = parser.parse_args()
if not os.path.exists(args.evidence) or not os.path.isdir(args.evidence):
print("Evidence not found...exiting")
sys.exit(1)
if not os.path.exists(args.destination):
os.makedirs(args.destination)
# Sets up Logging
logging.basicConfig(filename=os.path.join(args.destination, 'anparser.log'), level=logging.DEBUG,
format='%(asctime)s | %(levelname)s | %(message)s', filemode='w')
logging.info('Starting Anparser v' + __version__)
logging.info('System ' + sys.platform)
logging.info('Version ' + sys.version)
# Pre-process files
files_to_process = scan_for_files(args.evidence)
#
# Start of Plugin Processing
#
# Run plugins
# Android Browser Parser
msg = 'Processing Android Browser'
logging.info(msg)
print(msg)
browser_bookmarks, browser_history = plugins.sqlite_plugins.android_browser.android_browser(files_to_process)
browser_user_defaults, browser_preferences = plugins.xml_plugins.android_browser.android_browser(files_to_process)
# Android Calendar Parser
msg = 'Processing Android Calendar'
logging.info(msg)
print(msg)
calendar_attendees, calendar_events, calendar_reminders, calendar_tasks = \
plugins.sqlite_plugins.android_calendar.android_calendar(files_to_process)
# Android Chrome Parser
msg = 'Processing Android Chrome'
logging.info(msg)
print(msg)
chrome_cookies, chrome_downloads, chrome_keywords, chrome_urls, chrome_visits = \
plugins.sqlite_plugins.android_chrome.android_chrome(files_to_process)
# Android Contact Parser
msg = 'Processing Android Contacts'
logging.info(msg)
print(msg)
contacts_raw, contacts_accounts, contacts_phone = \
plugins.sqlite_plugins.android_contacts.android_contacts(files_to_process)
# Android Downloads Parser
msg = 'Processing Android Downloads'
logging.info(msg)
print(msg)
downloads_data = plugins.sqlite_plugins.android_downloads.android_downloads(files_to_process)
# Android Emergencymode Parser
msg = 'Processing Android EmergencyMode'
logging.info(msg)
print(msg)
emergency_data = plugins.sqlite_plugins.android_emergencymode.android_emergencymode(files_to_process)
# Android Gallery3d Parser
msg = 'Processing Android Gallery3d'
logging.info(msg)
print(msg)
file_info, gallery_download, gallery_albums, gallery_photos, gallery_users = \
plugins.sqlite_plugins.android_gallery3d.android_gallery3d(files_to_process)
# Android Gmail Parser
# TODO: Add in the android_gmail_message_extractor & parser
msg = 'Processing Android GMail'
logging.info(msg)
print(msg)
gmail_accounts_data = plugins.xml_plugins.android_gmail.android_gmail(files_to_process)
# Android Logsprovider Parser
msg = 'Processing Android Logsprovider'
logging.info(msg)
print(msg)
android_logsprovider_data = plugins.sqlite_plugins.android_logsprovider.android_logsprovider(files_to_process)
# Android Media Parser
msg = 'Processing Android Media'
logging.info(msg)
print(msg)
external_media, internal_media = plugins.sqlite_plugins.android_media.android_media(files_to_process)
# Android MMS Parser
msg = 'Processing Android MMS'
logging.info(msg)
print(msg)
android_mms_events, android_mms_logs = plugins.sqlite_plugins.android_mms.android_mms(files_to_process)
# Android Telephony Parser
msg = 'Processing Android SMS'
logging.info(msg)
print(msg)
telephony_data_sms, telephony_data_threads = \
plugins.sqlite_plugins.android_telephony.android_telephony(files_to_process)
# Android Vending Parser
msg = 'Processing Android Vending'
logging.info(msg)
print(msg)
vending_library, vending_localapp, vending_suggestions = \
plugins.sqlite_plugins.android_vending.android_vending(files_to_process)
vending_data = plugins.xml_plugins.android_vending.android_vending(files_to_process)
# Facebook Parser
msg = 'Processing Facebook'
logging.info(msg)
print(msg)
katana_contact, katana_folder_count, katana_folder, katana_msg, katana_thread_user,\
katana_threads, katana_notifications = plugins.sqlite_plugins.facebook_katana.facebook_katana(files_to_process)
# Facebook Orca (Messenger) Parser
msg = 'Processing Facebook Messenger'
logging.info(msg)
print(msg)
orca_contact, orca_folder_count, orca_folder, orca_msg, orca_thread_user, orca_threads = \
plugins.sqlite_plugins.facebook_orca.facebook_orca(files_to_process)
# Google Docs Parser
msg = 'Processing Google Docs'
logging.info(msg)
print(msg)
google_docs_account, google_docs_collection, google_docs_contains, google_docs_entry = \
plugins.sqlite_plugins.google_docs.google_docs(files_to_process)
# Google Talk Parser
msg = 'Processing Google Talk'
logging.info(msg)
print(msg)
google_talk_data = plugins.xml_plugins.google_talk.google_talk(files_to_process)
# Google Plus Parser
msg = 'Processing Google Plus'
logging.info(msg)
print(msg)
google_plus_photos, google_plus_contacts_search, google_plus_contacts, google_plus_guns = \
plugins.sqlite_plugins.google_plus.google_plus(files_to_process)
google_plus_accounts = plugins.xml_plugins.google_plus.google_plus(files_to_process)
# Infraware Polaris Parser
msg = 'Processing Infraware Polaris'
logging.info(msg)
print(msg)
polaris_contacts, polaris_files, polaris_attendee, polaris_shared, polaris_messages = \
plugins.sqlite_plugins.infraware_office.infraware_office(files_to_process)
polaris_preferences = plugins.xml_plugins.infraware_office.infraware_office(files_to_process)
# Kik Messenger Parser
msg = 'Processing Kik Messenger'
logging.info(msg)
print(msg)
kik_content, kik_contact, kik_messages = plugins.sqlite_plugins.kik_android.kik_android(files_to_process)
kik_preferences_data = plugins.xml_plugins.kik_android.kik_android(files_to_process)
# Samsung Galaxyfinder Parser
msg = 'Processing Samsung Galaxyfinder'
logging.info(msg)
print(msg)
galaxyfinder_content, galaxyfinder_tagging, galaxyfinder_tags =\
plugins.sqlite_plugins.samsung_galaxyfinder.samsung_galaxyfinder(files_to_process)
# Skype Raider Parser
msg = 'Processing Skype'
logging.info(msg)
print(msg)
skype_accounts, skype_call_members, skype_calls, skype_chat_members, skype_chat, skype_contacts,\
skype_conversations, skype_media, skype_messages, skype_participants, skype_transfers =\
plugins.sqlite_plugins.skype_raider.skype_raider(files_to_process)
# Snapchat Parser
msg = 'Processing Snapchat'
logging.info(msg)
print(msg)
snapchat_chat, snapchat_conversation, snapchat_friends, snapchat_storyfiles, snapchat_recvsnaps,\
snapchat_sentsnaps, snapchat_images, snapchat_videos, snapchat_viewing = \
plugins.sqlite_plugins.snapchat_android.snapchat_android(files_to_process)
snapchat_preferences = plugins.xml_plugins.snapchat_android.snapchat_android(files_to_process)
# Teslacoilsw Launcer Parser
msg = 'Processing Teslacoilsw'
logging.info(msg)
print(msg)
tesla_allapps, tesla_favorites = \
plugins.sqlite_plugins.teslacoilsw_launcher.teslacoilsw_launcher(files_to_process)
# Valve Parser
msg = 'Processing Valve'
logging.info(msg)
print(msg)
valve_friends, valve_chat, valve_debug = \
plugins.sqlite_plugins.valvesoftware_android.valvesoftware_android(files_to_process)
valve_preferences = plugins.xml_plugins.valvesoftware_android.valvesoftware_android(files_to_process)
# Venmo Parser
msg = 'Processing Venmo'
logging.info(msg)
print(msg)
venmo_comments, venmo_stories, venmo_people, venmo_users = plugins.sqlite_plugins.venmo.venmo(files_to_process)
venmo_preferences = plugins.xml_plugins.venmo.venmo(files_to_process)
# Vlingo Midas Parser
msg = 'Processing Vlingo Midas'
logging.info(msg)
print(msg)
vlingo_contacts = plugins.sqlite_plugins.vlingo_midas.vlingo_midas(files_to_process)
# Whisper Parser
msg = 'Processing Whisper'
logging.info(msg)
print(msg)
whisper_conversations, whisper_messages, whisper_whispers, whisper_groups, whisper_notifications = \
plugins.sqlite_plugins.sh_whisper.sh_whisper(files_to_process)
whisper_preferences = plugins.xml_plugins.sh_whisper.sh_whisper(files_to_process)
# Yara Malware Parser
if args.y:
msg = 'Running Yara Malware Scanner'
logging.info(msg)
print(msg)
try:
yara_data = plugins.other_plugins.yara_parser.yara_parser(files_to_process, args.y)
except IOError:
pass
# RegEx Searches
if args.s:
msg = 'Running Search module'
logging.info(msg)
print(msg)
search_data = plugins.other_plugins.search_parser.search_parser(files_to_process, args.s)
msg = 'Processors Complete'
logging.info(msg)
print(msg)
#
# End of Plugin Processing
#
#
# Start of Writers
#
msg = 'Writing data to output...'
logging.info(msg)
print(msg)
# Write Contact Data
android_dict = {}
facebook_dict = {}
google_dict = {}
infraware_dict = {}
kik_dict = {}
samsung_dict = {}
skype_dict = {}
snapchat_dict = {}
tesla_dict = {}
valve_dict = {}
venmo_dict = {}
vlingo_dict = {}
whisper_dict = {}
yara_dict = {}
search_dict = {}
android_path = args.destination + '//Android'
android_dict['android_browser_bookmarks'] = browser_bookmarks
android_dict['android_browser_history'] = browser_history
android_dict['android_browser_preferences'] = browser_preferences
android_dict['android_browser_user_defaults'] = browser_user_defaults
android_dict['android_calendar_attendees'] = calendar_attendees
android_dict['android_calendar_events'] = calendar_events
android_dict['android_calendar_reminders'] = calendar_reminders
android_dict['android_calendar_tasks'] = calendar_tasks
android_dict['android_chrome_cookies'] = chrome_cookies
android_dict['android_chrome_downloads'] = chrome_downloads
android_dict['android_chrome_keywords'] = chrome_keywords
android_dict['android_chrome_urls'] = chrome_urls
android_dict['android_chrome_visits'] = chrome_visits
android_dict['android_contacts_rawcontacts'] = contacts_raw
android_dict['android_contacts_accounts'] = contacts_accounts
android_dict['android_contacts_phonelookup'] = contacts_phone
android_dict['android_downloads'] = downloads_data
android_dict['android_emergencymode'] = emergency_data
android_dict['android_gallery3d_fileinfo'] = file_info
android_dict['android_gallery3d_downloads'] = gallery_download
android_dict['android_gallery3d_albums'] = gallery_albums
android_dict['android_gallery3d_photos'] = gallery_photos
android_dict['android_gallery3d_users'] = gallery_users
android_dict['android_gmail_accounts'] = gmail_accounts_data
android_dict['android_logsprovider'] = android_logsprovider_data
android_dict['android_media_external'] = external_media
android_dict['android_media_internal'] = internal_media
android_dict['android_mms_events'] = android_mms_events
android_dict['android_mms_logs'] = android_mms_logs
android_dict['android_telephony_sms'] = telephony_data_sms
android_dict['android_telephony_threads'] = telephony_data_threads
android_dict['android_vending_accounts'] = vending_data
android_dict['android_vending_library'] = vending_library
android_dict['android_vending_localapps'] = vending_localapp
android_dict['android_vending_suggestions'] = vending_suggestions
facebook_path = args.destination + '//Facebook'
facebook_dict['facebook_katana_contacts'] = katana_contact
facebook_dict['facebook_katana_folder_count'] = katana_folder_count
facebook_dict['facebook_katana_folder'] = katana_folder
facebook_dict['facebook_katana_messages'] = katana_msg
facebook_dict['facebook_katana_thread_users'] = katana_thread_user
facebook_dict['facebook_katana_threads'] = katana_threads
facebook_dict['facebook_katana_notifications'] = katana_notifications
facebook_dict['facebook_orca_contacts'] = orca_contact
facebook_dict['facebook_orca_folder_count'] = orca_folder_count
facebook_dict['facebook_orca_folder'] = orca_folder
facebook_dict['facebook_orca_messages'] = orca_msg
facebook_dict['facebook_orca_thread_users'] = orca_thread_user
facebook_dict['facebook_orca_threads'] = orca_threads
google_path = args.destination + '//Google'
google_dict['google_docs_accounts'] = google_docs_account
google_dict['google_docs_collection'] = google_docs_collection
google_dict['google_docs_contains'] = google_docs_contains
google_dict['google_docs_entry'] = google_docs_entry
google_dict['google_talk_accounts'] = google_talk_data
google_dict['google_plus_accounts'] = google_plus_accounts
google_dict['google_plus_photos'] = google_plus_photos
google_dict['google_plus_contact_search'] = google_plus_contacts_search
google_dict['google_plus_contacts'] = google_plus_contacts
google_dict['google_plus_guns'] = google_plus_guns
infraware_path = args.destination + '//Infraware'
infraware_dict['polaris_contacts'] = polaris_contacts
infraware_dict['polaris_files'] = polaris_files
infraware_dict['polaris_attendees'] = polaris_attendee
infraware_dict['polaris_shared_files'] = polaris_shared
infraware_dict['polaris_messages'] = polaris_messages
infraware_dict['polaris_preferences'] = polaris_preferences
kik_path = args.destination + '//Kik'
kik_dict['kik_content'] = kik_content
kik_dict['kik_contacts'] = kik_contact
kik_dict['kik_messages'] = kik_messages
kik_dict['kik_preferences'] = kik_preferences_data
samsung_path = args.destination + '//Samsung'
samsung_dict['samsung_galaxyfinder_content'] = galaxyfinder_content
samsung_dict['samsung_galaxyfinder_tagging'] = galaxyfinder_tagging
samsung_dict['samsung_galaxyfinder_tags'] = galaxyfinder_tags
skype_path = args.destination + '//Skype'
skype_dict['skype_raider_accounts'] = skype_accounts
skype_dict['skype_raider_call_members'] = skype_call_members
skype_dict['skype_raider_calls'] = skype_calls
skype_dict['skype_raider_chat_members'] = skype_chat_members
skype_dict['skype_raider_chats'] = skype_chat
skype_dict['skype_raider_contacts'] = skype_contacts
skype_dict['skype_raider_conversations'] = skype_conversations
skype_dict['skype_raider_media'] = skype_media
skype_dict['skype_raider_messages'] = skype_messages
skype_dict['skype_raider_participants'] = skype_participants
skype_dict['skype_raider_media_transfers'] = skype_transfers
snapchat_path = args.destination + '//Snapchat'
snapchat_dict['snapchat_chat'] = snapchat_chat
snapchat_dict['snapchat_conversation'] = snapchat_conversation
snapchat_dict['snapchat_friends'] = snapchat_friends
snapchat_dict['snapchat_storyfiles'] = snapchat_storyfiles
snapchat_dict['snapchat_recvsnaps'] = snapchat_recvsnaps
snapchat_dict['snapchat_sentsnaps'] = snapchat_sentsnaps
snapchat_dict['snapchat_images'] = snapchat_images
snapchat_dict['snapchat_videos'] = snapchat_videos
snapchat_dict['snapchat_viewingsessions'] = snapchat_viewing
snapchat_dict['snapchat_preferences'] = snapchat_preferences
tesla_path = args.destination + '//Teslacoilsw'
tesla_dict['teslacoilsw_allapps'] = tesla_allapps
tesla_dict['teslacoilsw_favorites'] = tesla_favorites
valve_path = args.destination + '//Valve'
valve_dict['valve_friends'] = valve_friends
valve_dict['valve_chat'] = valve_chat
valve_dict['valve_debug'] = valve_debug
valve_dict['valve_preferences'] = valve_preferences
venmo_path = args.destination + '//Venmo'
venmo_dict['venmo_comments'] = venmo_comments
venmo_dict['venmo_stories'] = venmo_stories
venmo_dict['venmo_people'] = venmo_people
venmo_dict['venmo_users'] = venmo_users
venmo_dict['venmo_preferences'] = venmo_preferences
vlingo_path = args.destination + '//Vlingo'
vlingo_dict['vlingo_midas_contacts'] = vlingo_contacts
whisper_path = args.destination + '//Whisper'
whisper_dict['whisper_conversations'] = whisper_conversations
whisper_dict['whisper_messages'] = whisper_messages
whisper_dict['whisper_posts'] = whisper_whispers
whisper_dict['whisper_groups'] = whisper_groups
whisper_dict['whisper_notifications'] = whisper_notifications
whisper_dict['whisper_preferences'] = whisper_preferences
if args.y:
yara_path = args.destination + '//Yara'
try:
yara_dict['yara_matches'] = yara_data
except NameError:
pass
if args.s:
search_path = args.destination + '//Search'
try:
search_dict['search_matches'] = search_data
except NameError:
pass
if args.o.lower() == 'csv':
writers.csv_writer.csv_writer(android_dict, android_path)
writers.csv_writer.csv_writer(facebook_dict, facebook_path)
writers.csv_writer.csv_writer(google_dict, google_path)
writers.csv_writer.csv_writer(infraware_dict, infraware_path)
writers.csv_writer.csv_writer(kik_dict, kik_path)
writers.csv_writer.csv_writer(samsung_dict, samsung_path)
writers.csv_writer.csv_writer(skype_dict, skype_path)
writers.csv_writer.csv_writer(snapchat_dict, snapchat_path)
writers.csv_writer.csv_writer(tesla_dict, tesla_path)
writers.csv_writer.csv_writer(valve_dict, valve_path)
writers.csv_writer.csv_writer(venmo_dict, venmo_path)
writers.csv_writer.csv_writer(vlingo_dict, vlingo_path)
writers.csv_writer.csv_writer(whisper_dict, whisper_path)
if yara_dict != {}:
writers.csv_writer.csv_writer(yara_dict, yara_path)
if search_dict != {}:
writers.csv_writer.csv_writer(search_dict, search_path)
else:
writers.xlsx_writer.xlsx_writer(android_dict, android_path, 'android.xlsx')
writers.xlsx_writer.xlsx_writer(facebook_dict, facebook_path, 'facebook.xlsx')
writers.xlsx_writer.xlsx_writer(google_dict, google_path, 'google.xlsx')
writers.xlsx_writer.xlsx_writer(infraware_dict, infraware_path, 'infraware.xlsx')
writers.xlsx_writer.xlsx_writer(kik_dict, kik_path, 'kik.xlsx')
writers.xlsx_writer.xlsx_writer(samsung_dict, samsung_path, 'samsung.xlsx')
writers.xlsx_writer.xlsx_writer(skype_dict, skype_path, 'skype.xlsx')
writers.xlsx_writer.xlsx_writer(snapchat_dict, snapchat_path, 'snapchat.xlsx')
writers.xlsx_writer.xlsx_writer(tesla_dict, tesla_path, 'teslacoilsw.xlsx')
writers.xlsx_writer.xlsx_writer(valve_dict, valve_path, 'valve.xlsx')
writers.xlsx_writer.xlsx_writer(venmo_dict, venmo_path, 'venmo.xlsx')
writers.xlsx_writer.xlsx_writer(vlingo_dict, vlingo_path, 'vlingo.xlsx')
writers.xlsx_writer.xlsx_writer(whisper_dict, whisper_path, 'whisper.xlsx')
if yara_dict != {}:
writers.xlsx_writer.xlsx_writer(yara_dict, yara_path, 'yara.xlsx')
if search_dict != {}:
writers.xlsx_writer.xlsx_writer(search_dict, search_path, 'search.xlsx')
msg = 'Completed'
logging.info(msg)
print(msg) | gpl-3.0 |
yevheniyc/Autodidact | 1u_Challenges/nwd_project/nwd_challenge_yev.py | 2 | 4365 | import csv
import pandas as pd
import logging
from nameparser import HumanName
LOG_FILE = 'logs/log.txt'
# clean previous logs from log file
open(LOG_FILE, 'w').close()
logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG)
class FileParser:
"""Parse the file following specified instructions."""
def __init__(self, input_file, output_file, columns_to_split=None):
self.input_file = input_file
self.output_file = output_file
def build(self):
"""Entry point for parsing the input file and generating
parsed output.
"""
df = self.get_dataframe()
parsed_document = self.parsed_document(df)
self.generate_output(parsed_document)
def get_dataframe(self):
"""Use Pandas to read in file and return dataframe object."""
return pd.read_csv(self.input_file, error_bad_lines=False)
def parsed_document(self, df):
"""Splits raw_headline, raw_full_name columns.
Args:
df: pandas dataframe object
Returns:
The list of lists, representing each record, containing the
original data, and the split information for raw_healine and
raw_full_name columns.
"""
final_output = []
for index, row in df.iterrows():
title, company_name = self.split_headline(row)
first_name, last_name = self.split_full_name(row)
final_output.append([
row['id'], row['raw_full_name'], row['raw_headline'],
title, first_name, last_name, company_name
])
return final_output
def split_headline(self, row):
"""Split Headline Column.
Args:
row: raw_headline column for a single record
Returns:
title: string representing position title
column_name: string represting the name of the company
"""
try:
# handle rare occasions of '@' instead of 'at'
if '@' in row['raw_headline']:
splitted_headline = row['raw_headline'].split('@', 1)
else:
splitted_headline = row['raw_headline'].rsplit(' at ', 1)
# check to see if the split event occured
if len(splitted_headline) > 1:
title, company_name = splitted_headline
else:
title = splitted_headline[0]
company_name = 'n/a'
except ValueError as e:
logging.debug(
""">>> title or company_name were missing or improperly parsed for raw_headline => {}
{}""".format(e, row['raw_headline'])
)
title = 'n/a'
company_name = 'n/a'
return (title, company_name)
def split_full_name(self, row):
"""Split Full Name Column.
Args:
row: raw_headline column for a single record
Returns:
first_name: string representing first name of an employee
last_name: string represting last name of an employee
"""
try:
full_name = HumanName(row['raw_full_name'])
first_name = full_name.first
last_name = full_name.last
except ValueError as e:
logging.debug(
""">>> first_name or last_name were missing or improperly properly for raw_full_name => {}
{}""".format(e, row['raw_full_name'])
)
first_name = 'n/a'
last_name = 'n/a'
return (first_name, last_name)
def generate_output(self, parsed_document):
"""Generate the out in the indicated output file.
Args:
parsed_document list: contains all parsed items to be added to the output.
"""
with open(self.output_file, 'w', newline='') as out_f:
writer = csv.writer(out_f)
writer.writerow(['id', 'raw_full_name', 'raw_headline', 'title', 'first_name', 'last_name', 'company_name'])
writer.writerows(parsed_document)
print('>>>> Output has been generated in file: {}'.format(self.output_file))
if __name__ == '__main__':
file_parser = FileParser(input_file='nwd-code-challenge/nwd-code-challenge-input.csv',
output_file='my-solution-2018-03-26.csv')
file_parser.build()
| mit |
yask123/scikit-learn | sklearn/preprocessing/tests/test_data.py | 71 | 38516 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
@ignore_warnings
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
@ignore_warnings
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
jiajunshen/partsNet | pnet/permutation_mm.py | 1 | 9041 | from __future__ import division, print_function, absolute_import
import numpy as np
import itertools as itr
import amitgroup as ag
from scipy.special import logit
from scipy.misc import logsumexp
from sklearn.base import BaseEstimator
import time
class PermutationMM(BaseEstimator):
"""
A Bernoulli mixture model with the option of a latent permutation. Each
sample gets transformed a number of times into a set of blocks. The
parameter space is similarly divided into blocks, which when trained
will represent the same transformations. The latent permutation
dictates what parameter block a sample block should be tested against.
n_components : int, optional
Number of mixture components. Defaults to 1.
permutations : int or array, optional
If integer, the number of permutations should be specified and a cyclic
permutation will be automatically built. If an array, each row is a
permutation of the blocks.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
min_prob : float, optional
Floor for the minimum probability
thresh : float, optional
Convergence threshold.
n_iter : float, optional
Number of EM iterations to perform
n_init : int, optional
Number of random initializations to perform with
the best kept.
Attributes
----------
`weights_` : array, shape (`n_components`,)
Stores the mixing weights for each component
`means_` : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
`converged_` : bool
True when convergence was reached in fit(), False otherwise.
"""
def __init__(self, n_components=1, permutations=1, n_iter=20, n_init=1, random_state=0, min_probability=0.05, thresh=1e-8):
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
self.random_state = random_state
self.n_components = n_components
if isinstance(permutations, int):
# Cycle through them
P = permutations
self.permutations = np.zeros((P, P))
for p1, p2 in itr.product(range(P), range(P)):
self.permutations[p1,p2] = (p1 + p2) % P
else:
self.permutations = np.asarray(permutations)
self.n_iter = n_iter
self.n_init = n_init
self.min_probability = min_probability
self.thresh = thresh
self.weights_ = None
self.means_ = None
def score_block_samples(self, X):
"""
Score complete camples according to the full model. This means that each sample
has all its blocks with the different transformations for each permutation.
Parameters
----------
X : ndarray
Array of samples. Must have shape `(N, P, D)`, where `N` are number
of samples, `P` number of permutations and `D` number of dimensions
(flattened if multi-dimensional).
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each full data point in X.
log_responsibilities : array_like, shape (n_samples, n_components, n_permutations)
Log posterior probabilities of each mixture component and
permutation for each observation.
"""
N = X.shape[0]
K = self.n_components
P = len(self.permutations)
unorm_log_resp = np.empty((N, K, P))
unorm_log_resp[:] = np.log(self.weights_[np.newaxis])
for p in range(P):
for shift in range(P):
p0 = self.permutations[shift,p]
unorm_log_resp[:,:,p] += np.dot(X[:,p0], logit(self.means_[:,shift]).T)
unorm_log_resp+= np.log(1 - self.means_[:,self.permutations]).sum(2).sum(2)
logprob = logsumexp(unorm_log_resp.reshape((unorm_log_resp.shape[0], -1)), axis=-1)
log_resp = (unorm_log_resp - logprob[...,np.newaxis,np.newaxis]).clip(min=-500)
return logprob, log_resp
def fit(self, X):
"""
Estimate model parameters with the expectation-maximization algorithm.
Parameters are set when constructing the estimator class.
Parameters
----------
X : array_like, shape (n, n_permutations, n_features)
Array of samples, where each sample has been transformed `n_permutations` times.
"""
print(X.shape)
assert X.ndim == 3
N, P, F = X.shape
assert P == len(self.permutations)
K = self.n_components
eps = self.min_probability
max_log_prob = -np.inf
for trial in range(self.n_init):
self.weights_ = np.ones((K, P)) / (K * P)
# Initialize by picking K components at random.
repr_samples = X[self.random_state.choice(N, K, replace=False)]
self.means_ = repr_samples.clip(eps, 1 - eps)
#self.q = np.empty((N, K, P))
loglikelihoods = []
self.converged_ = False
for loop in range(self.n_iter):
start = time.clock()
# E-step
logprob, log_resp = self.score_block_samples(X)
resp = np.exp(log_resp)
log_dens = logsumexp(log_resp.transpose((0, 2, 1)).reshape((-1, log_resp.shape[1])), axis=0)[np.newaxis,:,np.newaxis]
dens = np.exp(log_dens)
# M-step
for p in range(P):
v = 0.0
for shift in range(P):
p0 = self.permutations[shift,p]
v += np.dot(resp[:,:,shift].T, X[:,p0])
self.means_[:,p,:] = v
self.means_ /= dens.ravel()[:,np.newaxis,np.newaxis]
self.means_[:] = self.means_.clip(eps, 1 - eps)
self.weights_[:] = (np.apply_over_axes(np.sum, resp, [0])[0,:,:] / N).clip(0.0001, 1 - 0.0001)
# Calculate log likelihood
loglikelihoods.append(logprob.sum())
ag.info("Trial {trial}/{n_trials} Iteration {iter} Time {time:.2f}s Log-likelihood {llh}".format(trial=trial+1,
n_trials=self.n_init,
iter=loop+1,
time=time.clock() - start,
llh=loglikelihoods[-1]))
if trial > 0 and abs(loglikelihoods[-1] - loglikelihoods[-2])/abs(loglikelihoods[-2]) < self.thresh:
self.converged_ = True
break
if loglikelihoods[-1] > max_log_prob:
ag.info("Updated best log likelihood to {0}".format(loglikelihoods[-1]))
max_log_prob = loglikelihoods[-1]
best_params = {'weights': self.weights_,
'means' : self.means_,
'converged': self.converged_}
self.weights_ = best_params['weights']
self.means_ = best_params['means']
self.converged_ = best_params['converged']
def predict_flat(self, X):
"""
Returns an array of which mixture component each data entry is
associate with the most. This is similar to `predict`, except it
collapses component and permutation to a single index.
Parameters
----------
X : ndarray
Data array to predict.
Returns
-------
components: list
An array of length `num_data` where `components[i]` indicates
the argmax of the posteriors. The permutation EM gives two indices, but
they have been flattened according to ``index * component + permutation``.
"""
logprob, log_resp = self.score_block_samples(X)
ii = log_resp.reshape((log_resp.shape[0], -1)).argmax(-1)
return ii
def predict(self, X):
"""
Returns a 2D array of which mixture component each data entry is associate with the most.
Parameters
----------
X : ndarray
Data array to predict.
Returns
-------
components: list
An array of shape `(num_data, 2)` where `components[i]` indicates
the argmax of the posteriors. For each sample, we have two values,
the first is the part and the second is the permutation.
"""
ii = self.predict_flat(X)
return np.vstack(np.unravel_index(ii, (self.n_components, len(self.permutations)))).T
| bsd-3-clause |
CognitiveRobotics/rpg_svo | svo_analysis/src/svo_analysis/analyse_trajectory.py | 17 | 8764 | #!/usr/bin/python
import os
import yaml
import argparse
import numpy as np
import matplotlib.pyplot as plt
import svo_analysis.tum_benchmark_tools.associate as associate
import vikit_py.transformations as transformations
import vikit_py.align_trajectory as align_trajectory
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Cardo']})
rc('text', usetex=True)
def plot_translation_error(timestamps, translation_error, results_dir):
fig = plt.figure(figsize=(8, 2.5))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='position drift [mm]', xlim=[0,timestamps[-1]-timestamps[0]+4])
ax.plot(timestamps-timestamps[0], translation_error[:,0]*1000, 'r-', label='x')
ax.plot(timestamps-timestamps[0], translation_error[:,1]*1000, 'g-', label='y')
ax.plot(timestamps-timestamps[0], translation_error[:,2]*1000, 'b-', label='z')
ax.legend()
fig.tight_layout()
fig.savefig(results_dir+'/translation_error.pdf')
def plot_rotation_error(timestamps, rotation_error, results_dir):
fig = plt.figure(figsize=(8, 2.5))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='orientation drift [rad]', xlim=[0,timestamps[-1]-timestamps[0]+4])
ax.plot(timestamps-timestamps[0], rotation_error[:,0], 'r-', label='yaw')
ax.plot(timestamps-timestamps[0], rotation_error[:,1], 'g-', label='pitch')
ax.plot(timestamps-timestamps[0], rotation_error[:,2], 'b-', label='roll')
ax.legend()
fig.tight_layout()
fig.savefig(results_dir+'/orientation_error.pdf')
def analyse_synthetic_trajectory(results_dir):
data = np.loadtxt(os.path.join(results_dir, 'translation_error.txt'))
timestamps = data[:,0]
translation_error = data[:,1:4]
plot_translation_error(timestamps, translation_error, results_dir)
# plot orientation error
data = np.loadtxt(os.path.join(results_dir, 'orientation_error.txt'))
timestamps = data[:,0]
orientation_error = data[:,1:4]
plot_rotation_error(timestamps, orientation_error, results_dir)
def analyse_optitrack_trajectory_with_hand_eye_calib(results_dir, params, n_align_frames = 200):
print('loading hand-eye-calib')
T_cm_quat = np.array([params['hand_eye_calib']['Tcm_qx'],
params['hand_eye_calib']['Tcm_qy'],
params['hand_eye_calib']['Tcm_qz'],
params['hand_eye_calib']['Tcm_qw']])
T_cm_tran = np.array([params['hand_eye_calib']['Tcm_tx'],
params['hand_eye_calib']['Tcm_ty'],
params['hand_eye_calib']['Tcm_tz']])
T_cm = get_rigid_body_trafo(T_cm_quat, T_cm_tran)
T_mc = transformations.inverse_matrix(T_cm)
t_es, p_es, q_es, t_gt, p_gt, q_gt = load_dataset(results_dir, params['cam_delay'])
# align Sim3 to get scale
print('align Sim3 using '+str(n_align_frames)+' first frames.')
scale,rot,trans = align_trajectory.align_sim3(p_gt[0:n_align_frames,:], p_es[0:n_align_frames,:])
print 'scale = '+str(scale)
# get trafo between (v)ision and (o)ptitrack frame
print q_gt[0,:]
print p_gt[0,:]
T_om = get_rigid_body_trafo(q_gt[0,:], p_gt[0,:])
T_vc = get_rigid_body_trafo(q_es[0,:], scale*p_es[0,:])
T_cv = transformations.inverse_matrix(T_vc)
T_ov = np.dot(T_om, np.dot(T_mc, T_cv))
print 'T_ov = ' + str(T_ov)
# apply transformation to estimated trajectory
q_es_aligned = np.zeros(np.shape(q_es))
rpy_es_aligned = np.zeros(np.shape(p_es))
rpy_gt = np.zeros(np.shape(p_es))
p_es_aligned = np.zeros(np.shape(p_es))
for i in range(np.shape(p_es)[0]):
T_vc = get_rigid_body_trafo(q_es[i,:],p_es[i,:])
T_vc[0:3,3] *= scale
T_om = np.dot(T_ov, np.dot(T_vc, T_cm))
p_es_aligned[i,:] = T_om[0:3,3]
q_es_aligned[i,:] = transformations.quaternion_from_matrix(T_om)
rpy_es_aligned[i,:] = transformations.euler_from_quaternion(q_es_aligned[i,:], 'rzyx')
rpy_gt[i,:] = transformations.euler_from_quaternion(q_gt[i,:], 'rzyx')
# plot position error (drift)
translation_error = (p_gt-p_es_aligned)
plot_translation_error(t_es, translation_error, results_dir)
# plot orientation error (drift)
orientation_error = (rpy_gt - rpy_es_aligned)
plot_rotation_error(t_es, orientation_error, results_dir)
# plot scale drift
motion_gt = np.diff(p_gt, 0)
motion_es = np.diff(p_es_aligned, 0)
dist_gt = np.sqrt(np.sum(np.multiply(motion_gt,motion_gt),1))
dist_es = np.sqrt(np.sum(np.multiply(motion_es,motion_es),1))
fig = plt.figure(figsize=(8,2.5))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='scale change [\%]', xlim=[0,t_es[-1]+4])
scale_drift = np.divide(dist_es,dist_gt)*100-100
ax.plot(t_es, scale_drift, 'b-')
fig.tight_layout()
fig.savefig(results_dir+'/scale_drift.pdf')
# plot trajectory
fig = plt.figure()
ax = fig.add_subplot(111, title='trajectory', aspect='equal', xlabel='x [m]', ylabel='y [m]')
ax.plot(p_es_aligned[:,0], p_es_aligned[:,1], 'b-', label='estimate')
ax.plot(p_gt[:,0], p_gt[:,1], 'r-', label='groundtruth')
ax.plot(p_es_aligned[0:n_align_frames,0], p_es_aligned[0:n_align_frames,1], 'g-', linewidth=2, label='aligned')
ax.legend()
fig.tight_layout()
fig.savefig(results_dir+'/trajectory.pdf')
def analyse_trajectory(results_dir, n_align_frames = 200, use_hand_eye_calib = True):
params = yaml.load(open(os.path.join(results_dir, 'dataset_params.yaml'),'r'))
if params['dataset_is_blender']:
analyse_synthetic_trajectory(results_dir)
elif use_hand_eye_calib:
analyse_optitrack_trajectory_with_hand_eye_calib(results_dir, params, n_align_frames)
else:
t_es, p_es, q_es, t_gt, p_gt, q_gt = load_dataset(results_dir, params['cam_delay'])
scale,rot,trans = align_trajectory.align_sim3(p_gt[0:n_align_frames,:], p_es[0:n_align_frames,:])
p_es_aligned = np.zeros(np.shape(p_es))
for i in range(np.shape(p_es)[0]):
p_es_aligned[i,:] = scale*rot.dot(p_es[i,:]) + trans
# plot position error (drift)
translation_error = (p_gt-p_es_aligned)
plot_translation_error(t_es, translation_error, results_dir)
def get_rigid_body_trafo(quat,trans):
T = transformations.quaternion_matrix(quat)
T[0:3,3] = trans
return T
def load_dataset(results_dir, cam_delay):
print('loading dataset in '+results_dir)
print('cam_delay = '+str(cam_delay))
data_gt = open(os.path.join(results_dir, 'groundtruth.txt')).read()
lines = data_gt.replace(","," ").replace("\t"," ").split("\n")
data_gt = np.array([[np.float(v.strip()) for i,v in enumerate(line.split(" ")) if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"])
#data_gt = np.array([[np.float(v.strip()) for i,v in enumerate(line.split(" ")) if i != 1 and v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"])
data_gt = [(float(l[0]),l[1:]) for l in data_gt]
data_gt = dict(data_gt)
data_es = open(os.path.join(results_dir, 'traj_estimate.txt')).read()
lines = data_es.replace(","," ").replace("\t"," ").split("\n")
data_es = np.array([[np.float(v.strip()) for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"])
data_es = [(float(l[0]),l[1:]) for l in data_es]
data_es = dict(data_es)
matches = associate.associate(data_gt, data_es, -cam_delay, 0.02)
p_gt = np.array([[np.float(value) for value in data_gt[a][0:3]] for a,b in matches])
q_gt = np.array([[np.float(value) for value in data_gt[a][3:7]] for a,b in matches])
p_es = np.array([[np.float(value) for value in data_es[b][0:3]] for a,b in matches])
q_es = np.array([[np.float(value) for value in data_es[b][3:7]] for a,b in matches])
t_gt = np.array([np.float(a) for a,b in matches])
t_es = np.array([np.float(b) for a,b in matches])
# set start time to zero
start_time = min(t_es[0], t_gt[0])
t_es -= start_time
t_gt -= start_time
return t_es, p_es, q_es, t_gt, p_gt, q_gt
if __name__ == '__main__':
# parse command line
parser = argparse.ArgumentParser(description='''
Analyse trajectory
''')
parser.add_argument('results_dir', help='folder with the results')
parser.add_argument('--use_hand_eye_calib', help='', action='store_true')
parser.add_argument('--n_align_frames', help='', default=200)
args = parser.parse_args()
print('analyse trajectory for dataset: '+str(args.results_dir))
analyse_trajectory(args.results_dir,
n_align_frames = int(args.n_align_frames),
use_hand_eye_calib = args.use_hand_eye_calib) | gpl-3.0 |
trankmichael/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# Michael Becker <mike@beckerfuffle.com>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
maciejkula/scipy | scipy/spatial/_plotutils.py | 18 | 4033 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
was_held = ax.ishold()
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
ptp_bound = points.ptp(axis=0)
ax.set_xlim(points[:,0].min() - 0.1*ptp_bound[0],
points[:,0].max() + 0.1*ptp_bound[0])
ax.set_ylim(points[:,1].min() - 0.1*ptp_bound[1],
points[:,1].max() + 0.1*ptp_bound[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
ax.plot(tri.points[:,0], tri.points[:,1], 'o')
ax.triplot(tri.points[:,0], tri.points[:,1], tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
for simplex in hull.simplices:
ax.plot(hull.points[simplex,0], hull.points[simplex,1], 'k-')
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
"""
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
ax.plot(vor.points[:,0], vor.points[:,1], '.')
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
for simplex in vor.ridge_vertices:
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
ax.plot(vor.vertices[simplex,0], vor.vertices[simplex,1], 'k-')
ptp_bound = vor.points.ptp(axis=0)
center = vor.points.mean(axis=0)
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.any(simplex < 0):
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
ax.plot([vor.vertices[i,0], far_point[0]],
[vor.vertices[i,1], far_point[1]], 'k--')
_adjust_bounds(ax, vor.points)
return ax.figure
| bsd-3-clause |
Koheron/zynq-sdk | examples/alpha250-4/fft/python/test_noise_floor.py | 2 | 1169 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Measure the noise floor of the ADC and ADC + DAC
'''
import numpy as np
import os
import time
import matplotlib.pyplot as plt
from fft import FFT
from koheron import connect
host = os.getenv('HOST', '192.168.1.50')
client = connect(host, 'fft', restart=False)
driver = FFT(client)
Rload = 50 # Ohm
driver.set_fft_window(0)
driver.set_input_channel(0)
lpsd00 = np.sqrt(Rload * driver.read_psd(0)) # V/rtHz
lpsd10 = np.sqrt(Rload * driver.read_psd(1)) # V/rtHz
driver.set_input_channel(1)
lpsd01 = np.sqrt(Rload * driver.read_psd(0)) # V/rtHz
lpsd11 = np.sqrt(Rload * driver.read_psd(1)) # V/rtHz
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
ax.set_xlim([0, 125])
ax.set_ylim([0, 40])
freqs = np.arange(driver.n_pts / 2) * 250. / driver.n_pts
ax.plot(freqs, lpsd00 * 1e9, label='IN0')
ax.plot(freqs, lpsd01 * 1e9, label='IN1')
ax.plot(freqs, lpsd10 * 1e9, label='IN2')
ax.plot(freqs, lpsd11 * 1e9, label='IN3')
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('Voltage noise density (nV/rtHz)')
ax.legend()
plt.show()
np.save('alpha250_4_noise_floor.npy', (freqs, lpsd00, lpsd01, lpsd10, lpsd11)) | mit |
fierval/retina | DiabeticRetinopathy/Refactoring/kobra/dr/image_reader.py | 1 | 1244 | import numpy as np
import pandas as pd
from os import path
import cv2
class ImageReader(object):
'''
Reads images and their masks
'''
def __init__(self, root, im_file, masks_dir, gray_scale = False):
self._im_file = path.join(root, im_file)
self._masks_dir = masks_dir
assert (path.exists(self._im_file)), "Image does not exist"
assert (path.exists(self._masks_dir)), "Masks dir does not exist"
self._image = cv2.imread(self._im_file, cv2.IMREAD_GRAYSCALE if gray_scale else cv2.IMREAD_COLOR)
self._mask = self.get_mask()
self._image [ self._mask == 0] = 0
def get_mask(self):
mask_file = path.join(self._masks_dir, path.splitext(path.split(self._im_file)[1])[0] + ".png")
assert (path.exists(mask_file)), "Mask does not exist"
self._mask = cv2.imread(mask_file, cv2.IMREAD_GRAYSCALE)
return ImageReader.rescale_mask(self._image, self._mask)
@staticmethod
def rescale_mask(image, mask):
scale = image.shape[1], image.shape[0]
mask = cv2.resize(mask, scale)
return mask
@property
def mask(self):
return self._mask
@property
def image(self):
return self._image | mit |
jni/networkx | examples/graph/napoleon_russian_campaign.py | 44 | 3216 | #!/usr/bin/env python
"""
Minard's data from Napoleon's 1812-1813 Russian Campaign.
http://www.math.yorku.ca/SCS/Gallery/minard/minard.txt
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2006 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import string
import networkx as nx
def minard_graph():
data1="""\
24.0,54.9,340000,A,1
24.5,55.0,340000,A,1
25.5,54.5,340000,A,1
26.0,54.7,320000,A,1
27.0,54.8,300000,A,1
28.0,54.9,280000,A,1
28.5,55.0,240000,A,1
29.0,55.1,210000,A,1
30.0,55.2,180000,A,1
30.3,55.3,175000,A,1
32.0,54.8,145000,A,1
33.2,54.9,140000,A,1
34.4,55.5,127100,A,1
35.5,55.4,100000,A,1
36.0,55.5,100000,A,1
37.6,55.8,100000,A,1
37.7,55.7,100000,R,1
37.5,55.7,98000,R,1
37.0,55.0,97000,R,1
36.8,55.0,96000,R,1
35.4,55.3,87000,R,1
34.3,55.2,55000,R,1
33.3,54.8,37000,R,1
32.0,54.6,24000,R,1
30.4,54.4,20000,R,1
29.2,54.3,20000,R,1
28.5,54.2,20000,R,1
28.3,54.3,20000,R,1
27.5,54.5,20000,R,1
26.8,54.3,12000,R,1
26.4,54.4,14000,R,1
25.0,54.4,8000,R,1
24.4,54.4,4000,R,1
24.2,54.4,4000,R,1
24.1,54.4,4000,R,1"""
data2="""\
24.0,55.1,60000,A,2
24.5,55.2,60000,A,2
25.5,54.7,60000,A,2
26.6,55.7,40000,A,2
27.4,55.6,33000,A,2
28.7,55.5,33000,R,2
29.2,54.2,30000,R,2
28.5,54.1,30000,R,2
28.3,54.2,28000,R,2"""
data3="""\
24.0,55.2,22000,A,3
24.5,55.3,22000,A,3
24.6,55.8,6000,A,3
24.6,55.8,6000,R,3
24.2,54.4,6000,R,3
24.1,54.4,6000,R,3"""
cities="""\
24.0,55.0,Kowno
25.3,54.7,Wilna
26.4,54.4,Smorgoni
26.8,54.3,Moiodexno
27.7,55.2,Gloubokoe
27.6,53.9,Minsk
28.5,54.3,Studienska
28.7,55.5,Polotzk
29.2,54.4,Bobr
30.2,55.3,Witebsk
30.4,54.5,Orscha
30.4,53.9,Mohilow
32.0,54.8,Smolensk
33.2,54.9,Dorogobouge
34.3,55.2,Wixma
34.4,55.5,Chjat
36.0,55.5,Mojaisk
37.6,55.8,Moscou
36.6,55.3,Tarantino
36.5,55.0,Malo-Jarosewii"""
c={}
for line in cities.split('\n'):
x,y,name=line.split(',')
c[name]=(float(x),float(y))
g=[]
for data in [data1,data2,data3]:
G=nx.Graph()
i=0
G.pos={} # location
G.pop={} # size
last=None
for line in data.split('\n'):
x,y,p,r,n=line.split(',')
G.pos[i]=(float(x),float(y))
G.pop[i]=int(p)
if last is None:
last=i
else:
G.add_edge(i,last,{r:int(n)})
last=i
i=i+1
g.append(G)
return g,c
if __name__ == "__main__":
(g,city)=minard_graph()
try:
import matplotlib.pyplot as plt
plt.figure(1,figsize=(11,5))
plt.clf()
colors=['b','g','r']
for G in g:
c=colors.pop(0)
node_size=[int(G.pop[n]/300.0) for n in G]
nx.draw_networkx_edges(G,G.pos,edge_color=c,width=4,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=node_size,node_color=c,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=5,node_color='k')
for c in city:
x,y=city[c]
plt.text(x,y+0.1,c)
plt.savefig("napoleon_russian_campaign.png")
except ImportError:
pass
| bsd-3-clause |
CGATOxford/proj029 | Proj029Pipelines/PipelineMetaomics.py | 1 | 44215 | ####################################################
####################################################
# functions and classes used in conjunction with
# pipeline_metaomics.py
####################################################
####################################################
# import libraries
import sys
import re
import os
import itertools
import sqlite3
import CGAT.IOTools as IOTools
import CGATPipelines.Pipeline as P
from rpy2.robjects import r as R
import pandas
import numpy as np
####################################################
####################################################
####################################################
# SECTION 1
####################################################
####################################################
####################################################
def buildDiffStats(infile, outfile, db, connection):
'''
build differential abundance statistics
at different p-value and Fold change
thresholds for each comparison
'''
tablename = P.toTable(os.path.basename(infile))
statement = "ATTACH '%(db)s' as diff;" % locals()
connection.execute(statement)
# build table of results at different thresholds
ps = [0.01, 0.05, 0.1]
fcs = [0, 0.5, 1, 1.5, 2]
# build results for each pair
pairs = [("HhaIL10R", "WT"), ("WT", "aIL10R"), ("Hh", "WT")]
outf = open(outfile, "w")
outf.write("group1\tgroup2\tadj_P_Val\tlogFC\tnumber\n")
for pair in pairs:
p1, p2 = pair[0], pair[1]
for p, fc in itertools.product(ps, fcs):
statement = """SELECT COUNT(*)
FROM diff.%(tablename)s
WHERE group1 == "%(p1)s"
AND group2 == "%(p2)s"
AND adj_P_Val < %(p)f
AND abs(logFC) > %(fc)f""" % locals()
for data in connection.execute(statement).fetchall():
outf.write("\t".join([p1, p2, str(p), str(fc), str(data[0])]) + "\n")
outf.close()
####################################################
####################################################
####################################################
# SECTION 2
####################################################
####################################################
####################################################
def buildCommonList(rnadb, dnadb, outfile):
'''
build a list of NOGs/genera that were found in
common after filtering between RNA and
DNA data sets
'''
# select appropriate table depending on
# whether we want genera or NOGs
if "genera" in outfile:
tablename = "genus_diamond_aggregated_counts_diff"
else:
tablename = "gene_counts_diff"
# connect to respective
# databases for RNA and DNA
dbh_rna = sqlite3.connect(rnadb)
cc_rna = dbh_rna.cursor()
dbh_dna = sqlite3.connect(dnadb)
cc_dna = dbh_dna.cursor()
# collect NOGs/genera and write to
# file
outf = open(outfile, "w")
rna = set()
dna = set()
for gene in cc_rna.execute("""
SELECT taxa
FROM %s
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
""" % tablename).fetchall():
rna.add(gene[0])
for gene in cc_dna.execute("""SELECT taxa
FROM %s
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
""" % tablename).fetchall():
dna.add(gene[0])
for gene in rna.intersection(dna):
outf.write(gene + "\n")
####################################################
####################################################
####################################################
def buildDiffList(db,
commonset,
outfile,
fdr=0.05,
l2fold=1,
tablename=None):
'''
build a list of differentially expressed
NOGs between colitis and steady state
'''
# list of common NOGs for sql statement
common = set([x[:-1] for x in open(commonset).readlines()])
common = "(" + ",".join(['"'+x+'"' for x in common]) + ")"
# connect to database
dbh = sqlite3.connect(db)
cc = dbh.cursor()
# remove any genes that are different between Hh and steady state
# or between aIL10R and steady state
hh = set([x[0] for x in cc.execute("""SELECT taxa
FROM %s \
WHERE group1 == "Hh" \
AND group2 == "WT" \
AND adj_P_Val < %f""" % (tablename, fdr)).fetchall()])
# sql list
hh = "(" + ",".join(['"'+x+'"' for x in hh]) + ")"
ail10r = set([x[0] for x in cc.execute("""SELECT taxa
FROM %s
WHERE group1 == "WT"
AND group2 == "aIL10R"
AND adj_P_Val < %f""" % (tablename, fdr)).fetchall()])
# sql list
ail10r = "(" + ",".join(['"'+x+'"' for x in ail10r]) + ")"
outf = open(outfile, "w")
for gene in cc.execute("""SELECT taxa
FROM %s
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
AND adj_P_Val < %f
AND (logFC > %i OR logFC < -%i)
AND taxa IN %s
AND taxa NOT IN %s
AND taxa NOT IN %s
ORDER BY logFC DESC""" % (tablename, fdr, l2fold, l2fold, common, hh, ail10r)).fetchall():
outf.write(gene[0] + "\n")
outf.close()
####################################################
####################################################
####################################################
def heatmapDiffFeatures(diff_list,
matrix,
outfile):
'''
draw heatmap of differentially abundant features
'''
R('''library(gplots)''')
R('''library(gtools)''')
R('''diff <- read.csv("%s", header=F, sep="\t", stringsAsFactors=F)''' % diff_list)
R('''dat <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % matrix)
R('''rownames(dat) <- dat$taxa''')
R('''dat <- dat[, 1:ncol(dat)-1]''')
R('''dat <- dat[diff[,1],]''')
R('''dat <- na.omit(dat)''')
R('''dat <- dat[, mixedsort(colnames(dat))]''')
R('''samples <- colnames(dat)''')
R('''dat <- t(apply(dat, 1, scale))''')
R('''colnames(dat) <- samples''')
R('''cols <- colorRampPalette(c("blue", "white", "red"))''')
R('''pdf("%s")''' % outfile)
R('''heatmap.2(as.matrix(dat), col = cols, scale = "row", trace = "none", Rowv = F, Colv = F, margins = c(15,15),
distfun = function(x) dist(x, method = "manhattan"),
hclustfun = function(x) hclust(x, method = "ward.D2"))''')
R["dev.off"]()
####################################################
####################################################
####################################################
def buildDiffGeneOverlap(dnafile, rnafile, outfile):
'''
overlap differentially abundant NOGs between
RNA and DNA data sets
'''
dna = set([x[:-1] for x in open(dnafile).readlines()])
rna = set([x[:-1] for x in open(rnafile).readlines()])
ndna = len(dna)
nrna = len(rna)
overlap = len(dna.intersection(rna))
outf = open(outfile, "w")
outf.write("nDNA\tnRNA\tnoverlap\n%(ndna)i\t%(nrna)i\t%(overlap)i\n" % locals())
outf.close()
####################################################
####################################################
####################################################
def testSignificanceOfOverlap(common, overlap, outfile):
'''
Test significance of overlapping lists
bewteen RNA and DNA using hypergeometric test
'''
R('''pop <- read.csv("%s", header = F, sep = "\t", stringsAsFactors = F)''' % common)
R('''overlaps <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % overlap)
# total genes in population
R('''npop <- nrow(pop)''')
# x = number of white balls picked = overlap
R('''x <- overlaps$noverlap''')
# m = total number of white balls = total diff in RNA analysis
R('''m <- overlaps$nRNA''')
# n = total number of black balls = total - diff in RNA analysis
R('''n <- npop - m''')
# k = total balls sampled = number of genera different in DNA analysis
R('''k <- overlaps$nDNA''')
# hypergeometric test
R('''p <- 1-phyper(x,m,n,k)''')
# write result
R('''res <- matrix(ncol = 2, nrow = 5)''')
R('''res[1,1] <- "x"''')
R('''res[2,1] <- "m"''')
R('''res[3,1] <- "n"''')
R('''res[4,1] <- "k"''')
R('''res[5,1] <- "p-value"''')
R('''res[1,2] <- x''')
R('''res[2,2] <- m''')
R('''res[3,2] <- n''')
R('''res[4,2] <- k''')
R('''res[5,2] <- p''')
R('''print(res)''')
R('''write.table(as.data.frame(res), file = "%s", quote = F, sep = "\t", row.names = F)''' % outfile)
####################################################
####################################################
####################################################
def scatterplotAbundanceEstimates(dnamatrix,
rnamatrix,
outfile):
'''
scatterplot abundance estimates between DNA and RNA
data sets
'''
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rnamatrix)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,1:ncol(rna)-1]''')
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dnamatrix)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,1:ncol(dna)-1]''')
# intersection of taxa/NOGs present
R('''keep <- intersect(rownames(rna), rownames(dna))''')
# get data where there is rna and dna
R('''rna <- rna[keep,]''')
R('''dna <- dna[keep,]''')
# take averages
R('''rna.ave <- data.frame(apply(rna, 1, mean))''')
R('''dna.ave <- data.frame(apply(dna, 1, mean))''')
R('''print(cor(dna.ave,rna.ave)[[1]])''')
R('''png("%s")''' % outfile)
R('''plot(dna.ave[,1],
rna.ave[,1],
pch = 16,
col = "slateGrey",
xlab = "Mean DNA abundance",
ylab = "Mean RNA abundance",
main = paste("N = ", nrow(dna.ave), sep = ""))
abline(lm(rna[,1]~dna[,1], na.rm = T))''')
R["dev.off"]()
####################################################
####################################################
####################################################
def buildDetectionOverlap(rnacounts, dnacounts, outfile):
'''
build detection overlaps between RNA and DNA
data sets
'''
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rnacounts)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,1:ncol(rna)]''')
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dnacounts)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,1:ncol(dna)]''')
R('''taxa.rna <- rownames(rna)''')
R('''taxa.dna <- rownames(dna)''')
# union of taxa across samples
R('''nrna = length(taxa.rna)''')
R('''ndna = length(taxa.dna)''')
# get overlapping
R('''noverlap = length(intersect(taxa.rna, taxa.dna))''')
R('''result = data.frame(nrna = nrna, ndna = ndna, noverlap = noverlap)''')
R('''write.table(result, file = "%s", sep = "\t", quote = F, row.names = F)''' % outfile)
####################################################
####################################################
####################################################
def plotAbundanceLevelsOfOverlap(rnacounts,
dnacounts,
outfile,
of=None):
'''
plot abundance levels pf taxa/NOGs that do
and don't overlap between data sets
'''
R('''library(ggplot2)''')
# get rna reads per million
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rnacounts)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,2:ncol(rna)]''')
R('''rna <- sweep(rna, 2, colSums(rna)/1000000, "/")''')
# get dna reads per million
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dnacounts)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,2:ncol(dna)]''')
R('''dna <- sweep(dna, 2, colSums(dna)/1000000, "/")''')
# common and distinct sets
R('''common <- intersect(rownames(dna), rownames(rna))''')
R('''rna.only <- setdiff(rownames(rna), rownames(dna))''')
R('''dna.only <- setdiff(rownames(dna), rownames(rna))''')
# boxplot the abundance levels
R('''rna.common <- apply(rna[common,], 1, mean)''')
R('''dna.common <- apply(dna[common,], 1, mean)''')
R('''rna.distinct <- apply(rna[rna.only,], 1, mean)''')
R('''dna.distinct <- apply(dna[dna.only,], 1, mean)''')
if of == "genes":
# this is just so the thing will run
# genes do not have distinct genes
# in RNA analysis
R('''rna.distinct <- rep(0, 20)''')
else:
R('''rna.distinct <- rna.distinct''')
# test sig bewteen groups
R('''wtest1 <- wilcox.test(rna.common, rna.distinct)''')
R('''wtest2 <- wilcox.test(dna.common, dna.distinct)''')
R('''wtest3 <- wilcox.test(rna.common, dna.distinct)''')
R('''wtest4 <- wilcox.test(dna.common, rna.distinct)''')
R('''wtest5 <- wilcox.test(dna.common, rna.common)''')
R('''res <- data.frame("rna.common_vs_rna.distinct" = wtest1$p.value,
"dna.common_vs_dna.distinct" = wtest2$p.value,
"rna.common_vs_dna.distinct" = wtest3$p.value,
"dna.common_vs_rna.distinct" = wtest4$p.value,
"dna.common_vs_rna.common" = wtest5$p.value)''')
outname_sig = outfile[:-4] + ".sig"
R('''write.table(res, file = "%s", row.names = F, sep = "\t", quote = F)''' % outname_sig)
# create dataframe for plotting
R('''dat <- data.frame(values = c(dna.distinct, dna.common, rna.common, rna.distinct),
status = c(rep("unique.dna", length(dna.distinct)),
rep("common.dna", length(dna.common)),
rep("common.rna", length(rna.common)),
rep("unique.rna", length(rna.distinct))))''')
R('''plot1 <- ggplot(dat, aes(x = factor(status, levels = status), y = values, stat = "identity"))''')
R('''plot1 + geom_boxplot() + scale_y_log10()''')
R('''ggsave("%s")''' % outfile)
####################################################
####################################################
####################################################
# SECTION 3
####################################################
####################################################
####################################################
def runPCA(infile, outfile):
'''
run pca analysis - this outputs
a plot coloured by condition and
also the loadings
'''
if "RNA" in infile:
suffix = "rna"
else:
suffix = "dna"
if "gene" in infile:
xlim, ylim = 40,40
else:
xlim, ylim = 12,7
outname_plot = P.snip(outfile, ".loadings.tsv").replace("/", "/%s_" % suffix) + ".pca.pdf"
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
R('''rownames(dat) <- dat$taxa''')
R('''dat <- dat[, 1:ncol(dat)-1]''')
R('''pc <- prcomp(t(dat))''')
R('''conds <- unlist(strsplit(colnames(dat), ".R[0-9]"))[seq(1, ncol(dat)*2, 2)]''')
R('''conds <- unlist(strsplit(conds, ".", fixed = T))[seq(2, length(conds)*2, 2)]''')
# plot the principle components
R('''library(ggplot2)''')
R('''pcs <- data.frame(pc$x)''')
R('''pcs$cond <- conds''')
# get variance explained
R('''imps <- c(summary(pc)$importance[2], summary(pc)$importance[5])''')
R('''p <- ggplot(pcs, aes(x = PC1, y = PC2, colour = cond, size = 3)) + geom_point()''')
R('''p2 <- p + xlab(imps[1]) + ylab(imps[2])''')
R('''p3 <- p2 + scale_colour_manual(values = c("slateGrey", "green", "red", "blue"))''')
R('''p3 + xlim(c(-%i, %i)) + ylim(c(-%i, %i))''' % (xlim, xlim, ylim, ylim))
R('''ggsave("%s")''' % outname_plot)
# get the loadings
R('''loads <- data.frame(pc$rotation)''')
R('''loads$taxa <- rownames(loads)''')
# write out data
R('''write.table(loads, file = "%s", sep = "\t", row.names = F, quote = F)''' % outfile.replace("/", "/%s_" % suffix))
####################################################
####################################################
####################################################
def plotPCALoadings(infile, outfile):
'''
plot PCA loadings
'''
R('''library(ggplot2)''')
R('''library(grid)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
R('''top5pc1 <- dat[order(-dat$PC1),][1:5,]''')
R('''bottom5pc1 <- dat[order(dat$PC1),][1:5,]''')
R('''top5pc2 <- dat[order(-dat$PC2),][1:5,]''')
R('''bottom5pc2 <- dat[order(dat$PC2),][1:5,]''')
R('''totext <- data.frame(rbind(top5pc1, bottom5pc1, top5pc2, bottom5pc2))''')
R('''dat$x <- 0''')
R('''dat$y <- 0''')
R('''p <- ggplot(dat, aes(x = x, y = y, xend = PC1, yend = PC2, colour = taxa))''')
R('''p2 <- p + geom_segment(arrow = arrow(length = unit(0.2, "cm")))''')
R('''p2 + geom_text(data = totext, aes(x = PC1, y = PC2, label = totext$taxa, size = 6)) + xlim(c(-0.5,0.5)) + ylim(c(-0.5,0.25))''')
R('''ggsave("%s")''' % outfile)
# rna = [x for x in infiles if "RNA" in x][0]
# dna = [x for x in infiles if "DNA" in x][0]
# R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rna)
# R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dna)
# R('''rna <- rna[rna$group1 == "HhaIL10R" & rna$group2 == "WT",]''')
# R('''dna <- dna[dna$group1 == "HhaIL10R" & dna$group2 == "WT",]''')
# R('''rownames(rna) <- rna$taxa''')
# R('''rownames(dna) <- dna$taxa''')
# R('''rna <- rna[,1:ncol(rna)-1]''')
# R('''dna <- dna[,1:ncol(dna)-1]''')
# # only look at those that are present in both
# R('''keep <- intersect(rownames(rna), rownames(dna))''')
# R('''rna <- rna[keep,]''')
# R('''dna <- dna[keep,]''')
# R('''rna.ratio <- rna$logFC''')
# R('''dna.ratio <- dna$logFC''')
# R('''rna.p <- rna$adj.P.Val''')
# R('''dna.p <- dna$adj.P.Val''')
# R('''ratio <- data.frame(gene = keep, dna = dna.ratio, rna = rna.ratio, pdna = dna.p, prna = rna.p, ratio = rna.ratio - dna.ratio)''')
# R('''write.table(ratio, file = "%s", sep = "\t", row.names = F, quote = F)''' % outfile)
####################################################
####################################################
####################################################
def barchartProportions(infile, outfile):
'''
stacked barchart description of percent reads
mapping to each taxon
'''
R('''library(ggplot2)''')
R('''library(gtools)''')
R('''library(reshape)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
R('''rownames(dat) <- dat$taxa''')
# get rid of taxa colomn
R('''dat <- dat[,1:ncol(dat)-1]''')
R('''dat.percent <- data.frame(apply(dat, 2, function(x) x*100))''')
# candidate genera
R('''candidates <- c("Peptoniphilus",
"Deferribacter",
"Escherichia",
"Lactobacillus",
"Turicibacter",
"Akkermansia",
"Bifidobacterium",
"Methylacidiphilum")''')
R('''dat.percent <- dat.percent[candidates,]''')
R('''dat.percent <- dat.percent[,mixedsort(colnames(dat.percent))]''')
# add taxa column with "other" = < 5% in any sample
R('''dat.percent$taxa <- rownames(dat.percent)''')
# reshape and plot
outname = P.snip(outfile, ".pdf")
R('''dat.percent <- melt(dat.percent)''')
R('''conds <- unlist(strsplit(as.character(dat.percent$variable), ".R[0-9]"))[seq(1, nrow(dat.percent)*2, 2)]''')
R('''conds <- unlist(strsplit(conds, ".", fixed = T))[seq(2, length(conds)*2, 2)]''')
R('''dat.percent$cond <- conds''')
R('''for (taxon in candidates){
outname <- paste("%s", paste("_", taxon, sep=""), ".pdf", sep="")
dat.percent.restrict <- dat.percent[dat.percent$taxa==taxon,]
plot1 <- ggplot(dat.percent.restrict,
aes(x=factor(cond, levels=c("WT","aIL10R", "Hh", "HhaIL10R")),
y=value, group=cond, colour=cond, label=variable))
plot1 + geom_boxplot() + geom_jitter() + geom_text() + scale_colour_manual(values=c("darkGreen", "red", "grey", "blue"))
ggsave(outname)}''' % outname)
####################################################
####################################################
####################################################
# SECTION 4
####################################################
####################################################
####################################################
def buildRNADNARatio(dnadiff, rnadiff, outfile):
'''
build ratio of RNAfold/DNAfold
'''
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rnadiff)
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dnadiff)
R('''rna <- rna[rna$group1 == "HhaIL10R" & rna$group2 == "WT",]''')
R('''dna <- dna[dna$group1 == "HhaIL10R" & dna$group2 == "WT",]''')
R('''rownames(rna) <- rna$taxa''')
R('''rownames(dna) <- dna$taxa''')
R('''rna <- rna[,1:ncol(rna)-1]''')
R('''dna <- dna[,1:ncol(dna)-1]''')
# only look at those that are present in both
R('''keep <- intersect(rownames(rna), rownames(dna))''')
R('''rna <- rna[keep,]''')
R('''dna <- dna[keep,]''')
R('''rna.ratio <- rna$logFC''')
R('''dna.ratio <- dna$logFC''')
R('''rna.p <- rna$adj.P.Val''')
R('''dna.p <- dna$adj.P.Val''')
R('''ratio <- data.frame(gene = keep,
dna = dna.ratio,
rna = rna.ratio,
pdna = dna.p,
prna = rna.p,
ratio = rna.ratio - dna.ratio)''')
R('''write.table(ratio,
file = "%s",
sep = "\t",
row.names = F,
quote = F)''' % outfile)
####################################################
####################################################
####################################################
def annotateRNADNARatio(RNADNARatio,
dnalist,
rnalist,
outfile):
'''
annotate NOGs as to whether they were differentially
regulated in metagenomic, metatranscriptomic or both
data sets
'''
rna_diff = set([y[:-1] for y in open(rnalist).readlines()])
dna_diff = set([y[:-1] for y in open(dnalist).readlines()])
inf = IOTools.openFile(RNADNARatio)
inf.readline()
outf = IOTools.openFile(outfile, "w")
outf.write("gene\tdna\trna\tpdna\tprna\tratio\tstatus\n")
for line in inf.readlines():
gene, dna, rna, pdna, prna, ratio = line[:-1].split("\t")
gene = gene.strip('"')
dna, rna = float(dna), float(rna)
if gene in rna_diff and gene in dna_diff and dna > 0 and rna > 0:
status = "up.both"
elif gene in rna_diff and gene in dna_diff and dna < 0 and rna < 0:
status = "down.both"
elif gene in rna_diff and rna > 0:
status = "up.RNA"
elif gene in rna_diff and rna < 0:
status = "down.RNA"
elif gene in dna_diff and dna > 0:
status = "up.DNA"
elif gene in dna_diff and dna < 0:
status = "down.DNA"
else:
status = "NS"
outf.write("%(gene)s\t%(dna)s\t%(rna)s\t%(pdna)s\t%(prna)s\t%(ratio)s\t%(status)s\n" % locals())
outf.close()
####################################################
####################################################
####################################################
def plotSets(infile, outfile):
'''
plot the fold changes in RNA and DNA analyses
and label by how they are regulated in DNA and
RNA analyses
MUST HAVE GOI FILE IN WORKING DIR - not ideal
'''
R('''library(ggplot2)''')
# read in data
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
# get nog 2 gene map
R('''cog2gene <- read.csv("goi.tsv", header = F, stringsAsFactors = F, sep = "\t", row.names = 1)''')
# just get those signficant in either DNA or RNA or both
R('''dat$status[dat$status == "NS"] = "z"''')
R('''genes <- dat$gene''')
# regression model
R('''mod1 <- lm(dat$rna~dat$dna)''')
R('''intercept <- mod1[[1]][1]''')
R('''slope = mod1[[1]][2]''')
R('''print(summary(mod1))''')
# prediction intervals
R('''pred.ints <- predict(mod1, interval = "prediction", level = 0.95)''')
# add to data.frame
R('''dat$lwr <- pred.ints[,2]''')
R('''dat$upr <- pred.ints[,3]''')
# add labels
R('''dat$goi <- cog2gene[dat$gene,]''')
R('''dat$pointsize <- ifelse(!(is.na(dat$goi)), 10, 1)''')
# plot
R('''plot1 <- ggplot(dat, aes(x = dna, y = rna, alpha = 1, colour = status))''')
R('''plot2 <- plot1 + geom_point(shape = 18, aes(size = pointsize))''')
R('''plot3 <- plot2 + scale_size_area() + xlim(c(-5,5))''')
R('''plot4 <- plot3 + scale_colour_manual(values = c("blue",
"brown",
"darkGreen",
"orange",
"purple",
"red",
"grey"))''')
R('''plot5 <- plot4 + geom_abline(intercept = intercept, slope = slope)''')
# prediction intervals
R('''plot6 <- plot5 + geom_line(aes(x = dna, y = lwr), linetype = "dashed", colour = "black")''')
R('''plot7 <- plot6 + geom_line(aes(x = dna, y = upr), linetype = "dashed", colour = "black")''')
R('''plot7 + geom_text(aes(label = goi))''')
R('''ggsave("%s")''' % outfile)
####################################################
####################################################
####################################################
def buildGenesOutsidePredictionInterval(infile, outfile):
'''
annotate genes as being outside prediction
interval - these are the NOGs that we are
defining as colitis-responsive
'''
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
# just get those signficant in either DNA or RNA or both
R('''genes <- dat$gene''')
# regression model
R('''mod1 <- lm(dat$rna~dat$dna)''')
# prediction intervals
R('''pred.ints <- predict(mod1, interval = "prediction", level = 0.95)''')
# add to data.frame
R('''dat$lwr <- pred.ints[,2]''')
R('''dat$upr <- pred.ints[,3]''')
# annotate with whether or not they are above
# prediction intervals
R('''dat$pi_status[dat$rna > dat$upr & dat$status == "up.RNA"] <- "diff.up.rna"''')
R('''dat$pi_status[dat$rna > dat$upr & dat$status == "down.DNA"] <- "diff.down.dna"''')
R('''dat$pi_status[dat$rna > dat$upr & dat$status == "up.both"] <- "diff.up.rna"''')
R('''dat$pi_status[dat$rna < dat$lwr & dat$status == "down.RNA"] <- "diff.down.rna"''')
R('''dat$pi_status[dat$rna < dat$lwr & dat$status == "up.DNA"] <- "diff.up.dna"''')
R('''dat$pi_status[dat$rna < dat$lwr & dat$status == "down.both"] <- "diff.down.rna"''')
# write results
R('''write.table(dat, file = "%s", sep = "\t", quote = F, row.names = F)''' % outfile)
####################################################
####################################################
####################################################
# SECTION 6
####################################################
####################################################
####################################################
def buildGenusCogCountsMatrix(infile, outfile):
'''
build cog x genus proportion
matrix
'''
inf = IOTools.openFile(infile)
header = inf.readline()
result = {}
# create container for results
for line in inf.readlines():
data = line[:-1].split("\t")
cog, taxa = data[0], data[1]
if taxa == "unassigned": continue
result[cog] = {}
# get average % taxa per cog
inf = IOTools.openFile(infile)
header = inf.readline()
for line in inf.readlines():
data = line[:-1].split("\t")
if len(data) == 19:
cog, taxa = data[0], data[1]
values = map(float,data[3:])
elif len(data) == 20:
cog, taxa = data[0], data[1]
values = map(float,data[4:])
else:
cog, taxa = data[0], data[1]
values = map(float,data[2:])
if taxa == "unassigned": continue
ave = np.mean(values)
try:
result[cog][taxa] = ave
except KeyError: continue
df = pandas.DataFrame(result)
df.to_csv(outfile, sep = "\t", na_rep = 0)
####################################################
####################################################
####################################################
def mergePathwaysAndGenusCogCountsMatrix(annotations,
matrix,
outfile):
'''
merge cog annotations and per taxa cog counts
'''
# read annotations
R('''anno <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t", row.names=1)''' % annotations)
R('''anno.no.pathways <- anno[,1:ncol(anno)-1]''')
R('''anno.p <- sweep(anno.no.pathways, 2, colSums(anno.no.pathways), "/")''')
R('''anno.p$average <- rowMeans(anno.p)''')
R('''anno.p$pathway <- anno$taxa''')
# read matrix
R('''mat <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t", row.names=1)''' % matrix)
R('''mat <- data.frame(t(mat))''')
R('''mat$ref <- rownames(mat)''')
# split pathway annotations
R('''for (pathway in unique(anno.p$pathway)){
if (pathway == "Function unknown"){next}
# some weirness with some names
pw <- gsub("/", "_", pathway)
outname <- paste("candidate_pathways.dir", paste(pw, "tsv", sep = "."), sep="/")
outname <- gsub(" ", "_", outname)
print(outname)
anno.p2 <- anno.p[anno.p$pathway == pathway,]
anno.p2 <- anno.p2[order(anno.p2$average, decreasing=T),]
# top 10
# anno.p2 <- anno.p2[1:10,]
# merge with matrix
mat2 <- mat[rownames(anno.p2),]
mat2$pathway <- anno.p2$pathway
write.table(mat2, file=outname, sep="\t", row.names=F)}''')
####################################################
####################################################
####################################################
def plotNumberOfTaxaPerPathway(infiles, outfile):
'''
plot the average number of taxa expressing genes
in each pathway
'''
tmp = P.getTempFilename(".")
infs = " ".join(infiles)
statement = '''awk 'FNR==1 && NR!=1 { while (/ref/) getline; }1 {print}' %(infs)s > %(tmp)s'''
P.run()
R('''library(ggplot2)''')
R('''library(plyr)''')
R('''library(reshape)''')
R('''dat <-read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % tmp)
R('''t <- ncol(dat)''')
R('''dat <- na.omit(dat)''')
R('''pathways <- dat$pathway''')
R('''dat2 <- dat[,1:ncol(dat)-1]''')
R('''dat2 <- dat2[,1:ncol(dat2)-1]''')
# colsums gives the total number of taxa expressing each NOG
R('''col.sums <- data.frame(t(sapply(split(dat2, pathways), colSums)))''')
R('''rownames(col.sums) <- unique(pathways)''')
# rowsums gives the total number of taxa expressing
# at least one NOG per pathway
R('''total.taxa <- data.frame(rowSums(col.sums > 0))''')
R('''total.taxa$pathway <- rownames(col.sums)''')
# sort by highest
R('''total.taxa <- total.taxa[order(total.taxa[,1], decreasing=T), ]''')
R('''colnames(total.taxa) <- c("value", "pathway")''')
R('''plot1 <- ggplot(total.taxa, aes(x=factor(pathway,levels=pathway), y=value/t, stat="identity"))''')
R('''plot1 + geom_bar(stat="identity") + theme(axis.text.x=element_text(angle=90))''')
R('''ggsave("%s")''' % outfile)
os.unlink(tmp)
####################################################
####################################################
####################################################
def plotTaxaContributionsToCandidatePathways(matrix,
outfile):
'''
plot the distribution of maximum genus
contribution per gene set
'''
R('''library(ggplot2)''')
R('''library(gplots)''')
R('''library(pheatmap)''')
R('''mat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % matrix)
R('''mat <- na.omit(mat)''')
R('''print(mat$ref)''')
# just plot top 10
R('''rownames(mat) <- mat$ref''')
R('''mat2 <- mat[,1:ncol(mat)-1]''')
R('''mat2 <- mat2[,1:ncol(mat2)-1]''')
# only keep those genera that contribute > 5% to
# a NOG
R('''mat2 <- mat2[,colSums(mat2) > 5]''')
R('''cols <- colorRampPalette(c("white", "blue"))(75)''')
R('''pdf("%s")''' % outfile)
R('''pheatmap(mat2,
color=cols,
cluster_cols=T,
cluster_rows=T,
cluster_method="ward.D2")''')
R["dev.off"]()
####################################################
####################################################
####################################################
def plotMaxTaxaContribution(matrix, annotations, outfile):
'''
plot the distribution of maximum genus
contribution per gene set
'''
R('''library(ggplot2)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % matrix)
R('''annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % annotations)
R('''maximums <- apply(dat, 2, max)''')
R('''dat2 <- data.frame("cog" = colnames(dat), "max" = maximums)''')
R('''dat3 <- merge(dat2, annotations, by.x = "cog", by.y = "gene")''')
R('''dat3$pi_status <- ifelse(dat3$status == "NS", "NS", dat3$pi_status)''')
R('''dat3$pi_status[is.na(dat3$pi_status)] <- "other_significant"''')
R('''plot1 <- ggplot(dat3, aes(x = as.numeric(as.character(max)), group = pi_status, colour = pi_status))''')
R('''plot2 <- plot1 + stat_ecdf(size = 1.1)''')
R('''plot2 + scale_colour_manual(values = c("cyan3",
"darkorchid",
"black",
"darkgoldenrod2",
"grey",
"darkBlue"))''')
R('''ggsave("%s")''' % outfile)
####################################################
####################################################
####################################################
def testSignificanceOfMaxTaxaContribution(matrix, annotations, outfile):
'''
Test significance of distribution differences. Compared to NS
group
'''
R('''library(ggplot2)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % matrix)
R('''annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % annotations)
R('''maximums <- apply(dat, 2, max)''')
R('''dat2 <- data.frame("cog" = colnames(dat), "max" = maximums)''')
R('''dat3 <- merge(dat2, annotations, by.x = "cog", by.y = "gene")''')
R('''dat3$pi_status <- ifelse(dat3$status == "NS", "NS", dat3$pi_status)''')
R('''diff.up.rna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.up.rna"]))''')
R('''diff.down.rna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.down.rna"]))''')
R('''diff.up.dna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.up.dna"]))''')
R('''diff.down.dna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.down.dna"]))''')
R('''ns <- as.numeric(as.character(dat3$max[dat3$pi_status == "NS"]))''')
# ks tests
R('''ks1 <- ks.test(diff.up.rna, ns)''')
R('''ks2 <- ks.test(diff.down.rna, ns)''')
R('''ks3 <- ks.test(diff.up.dna, ns)''')
R('''ks4 <- ks.test(diff.down.dna, ns)''')
R('''res <- data.frame("RNAGreaterThanDNA.up.pvalue" = ks1$p.value,
"RNAGreaterThanDNA.up.D" = ks1$statistic,
"RNAGreaterThanDNA.down.pvalue" = ks2$p.value,
"RNAGreaterThanDNA.down.D" = ks2$statistic,
"DNAGreaterThanRNA.up.pvalue" = ks3$p.value,
"DNAGreaterThanRNA.up.D" = ks3$statistic,
"DNAGreaterThanRNA.down.pvalue" = ks4$p.value,
"DNAGreaterThanRNA.down.D" = ks4$statistic)''')
R('''write.table(res, file = "%s", sep = "\t", quote = F, row.names = F)''' % outfile)
####################################################
####################################################
####################################################
def heatmapTaxaCogProportionMatrix(matrix, annotations, outfile):
'''
plot the taxa associated with each cog on
a heatmap
'''
R('''library(gplots)''')
R('''library(gtools)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t", row.names = 1)''' % matrix)
R('''annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % annotations)
R('''rownames(annotations) <- annotations$gene''')
# get genes present in both - not sure why these are different
# in the first place - need to check
R('''genes <- intersect(rownames(annotations), colnames(dat))''')
R('''dat <- dat[, genes]''')
R('''dat <- dat[grep("unassigned", rownames(dat), invert = T),]''')
R('''genera <- rownames(dat)''')
R('''rownames(dat) <- genera''')
R('''colnames(dat) <- genes''')
R('''annotations <- annotations[genes,]''')
R('''annotations <- annotations[order(annotations$pi_status),]''')
# only for the COGs that have RNA fold > DNA fold up-regulated
R('''annotations <- annotations[annotations$pi_status == "diff.up.rna",]''')
R('''annotations <- na.omit(annotations)''')
R('''dat <- dat[,rownames(annotations)]''')
R('''annotation <- data.frame(cluster = as.character(annotations$pi_status))''')
R('''rownames(annotation) <- rownames(annotations)''')
R('''colors1 <- c("grey")''')
R('''names(colors1) <- c("diff.up.rna")''')
R('''anno_colors <- list(cluster = colors1)''')
R('''cols <- colorRampPalette(c("white", "darkBlue"))(150)''')
R('''dat <- dat[,colSums(dat > 50) >= 1]''')
R('''dat <- dat[rowSums(dat > 10) >= 1,]''')
# not reading numeric in all instances
R('''dat2 <- data.frame(t(apply(dat, 1, as.numeric)))''')
R('''colnames(dat2) <- colnames(dat)''')
R('''pdf("%s", height = 10, width = 15)''' % outfile)
R('''library(pheatmap)''')
R('''pheatmap(dat2,
clustering_distance_cols = "manhattan",
clustering_method = "ward",
annotation = annotation,
annotation_colors = anno_colors,
cluster_rows = T,
cluster_cols = F,
color = cols,
fontsize = 8)''')
R["dev.off"]()
####################################################
####################################################
####################################################
def scatterplotPerCogTaxaDNAFoldRNAFold(taxa_cog_rnadiff,
taxa_cog_dnadiff,
cog_rnadiff,
cog_dnadiff):
'''
scatterplot fold changes for per genus cog
differences for NOGs of interestx
'''
R('''library(ggplot2)''')
# read in cogs + taxa
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % taxa_cog_dnadiff)
R('''dna <- dna[dna$group2 == "WT" & dna$group1 == "HhaIL10R",]''')
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % taxa_cog_rnadiff)
R('''rna <- rna[rna$group2 == "WT" & rna$group1 == "HhaIL10R",]''')
# read in cogs alone
R('''dna.cog <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % cog_dnadiff)
R('''dna.cog <- dna.cog[dna.cog$group2 == "WT" & dna.cog$group1 == "HhaIL10R",]''')
R('''rna.cog <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % cog_rnadiff)
R('''rna.cog <- rna.cog[rna.cog$group2 == "WT" & rna.cog$group1 == "HhaIL10R",]''')
# merge data for cogs + taxa
R('''dat <- merge(dna, rna,
by.x = "taxa",
by.y = "taxa",
all.x = T,
all.y = T,
suffixes = c(".dna.taxa.cog", ".rna.taxa.cog"))''')
# sub NA for 0
R('''dat[is.na(dat)] <- 0''')
# NOTE these are specified and hardcoded
# here - NOGs of interest
R('''cogs <- c("COG0783", "COG2837", "COG0435","COG5520", "COG0508", "COG0852")''')
# iterate over cogs and scatterplot
# fold changes in DNA and RNA analysis.
# if not present in one or other then fold change will
# be 0
R('''for (cog in cogs){
dat2 <- dat[grep(cog, dat$taxa),]
dna.cog2 <- dna.cog[grep(cog, dna.cog$taxa),]
rna.cog2 <- rna.cog[grep(cog, rna.cog$taxa),]
# add the data for COG fold changes and abundance
dat3 <- data.frame("genus" = append(dat2$taxa, cog),
"dna.fold" = append(dat2$logFC.dna.taxa.cog, dna.cog2$logFC),
"rna.fold" = append(dat2$logFC.rna.taxa.cog, rna.cog2$logFC),
"abundance" = append(dat2$AveExpr.rna.taxa.cog, rna.cog2$AveExpr))
suffix <- paste(cog, "scatters.pdf", sep = ".")
outname <- paste("scatterplot_genus_cog_fold.dir", suffix, sep = "/")
plot1 <- ggplot(dat3, aes(x = dna.fold, y = rna.fold, size = log10(abundance), label = genus))
plot2 <- plot1 + geom_point(shape = 18)
plot3 <- plot2 + geom_text(hjust = 0.5, vjust = 1) + scale_size(range = c(3,6))
plot4 <- plot3 + geom_abline(intercept = 0, slope = 1, colour = "blue")
plot5 <- plot4 + geom_hline(yintercept = c(-1,1), linetype = "dashed")
plot6 <- plot5 + geom_vline(xintercept = c(-1,1), linetype = "dashed")
plot7 <- plot6 + geom_hline(yintercept = 0) + geom_vline(xintercept = 0)
ggsave(outname)
}''')
| bsd-3-clause |
robbymeals/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
brynpickering/calliope | calliope/test/test_example_models.py | 1 | 15368 | import os
import shutil
import pytest
from pytest import approx
import pandas as pd
import calliope
from calliope.test.common.util import check_error_or_warning
class TestModelPreproccesing:
def test_preprocess_national_scale(self):
calliope.examples.national_scale()
def test_preprocess_time_clustering(self):
calliope.examples.time_clustering()
def test_preprocess_time_resampling(self):
calliope.examples.time_resampling()
def test_preprocess_urban_scale(self):
calliope.examples.urban_scale()
def test_preprocess_milp(self):
calliope.examples.milp()
def test_preprocess_operate(self):
calliope.examples.operate()
def test_preprocess_time_masking(self):
calliope.examples.time_masking()
class TestNationalScaleExampleModelSenseChecks:
def example_tester(self, solver='glpk', solver_io=None):
override = {
'model.subset_time': '2005-01-01',
'run.solver': solver,
}
if solver_io:
override['run.solver_io'] = solver_io
model = calliope.examples.national_scale(override_dict=override)
model.run()
assert model.results.storage_cap.to_pandas()['region1-1::csp'] == approx(45129.950)
assert model.results.storage_cap.to_pandas()['region2::battery'] == approx(6675.173)
assert model.results.energy_cap.to_pandas()['region1-1::csp'] == approx(4626.588)
assert model.results.energy_cap.to_pandas()['region2::battery'] == approx(1000)
assert model.results.energy_cap.to_pandas()['region1::ccgt'] == approx(30000)
assert float(model.results.cost.sum()) == approx(38988.7442)
assert float(
model.results.systemwide_levelised_cost.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.063543, abs=0.000001)
assert float(
model.results.systemwide_capacity_factor.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.2642256, abs=0.000001)
def test_nationalscale_example_results_glpk(self):
self.example_tester()
def test_nationalscale_example_results_gurobi(self):
try:
import gurobipy
self.example_tester(solver='gurobi', solver_io='python')
except ImportError:
pytest.skip('Gurobi not installed')
def test_nationalscale_example_results_cplex(self):
# Check for existence of the `cplex` command
if shutil.which('cplex'):
self.example_tester(solver='cplex')
else:
pytest.skip('CPLEX not installed')
def test_nationalscale_example_results_cbc(self):
# Check for existence of the `cbc` command
if shutil.which('cbc'):
self.example_tester(solver='cbc')
else:
pytest.skip('CBC not installed')
def test_fails_gracefully_without_timeseries(self):
override = {
"locations.region1.techs.demand_power.constraints.resource": -200,
"locations.region2.techs.demand_power.constraints.resource": -400,
"techs.csp.constraints.resource": 100
}
with pytest.raises(calliope.exceptions.ModelError):
calliope.examples.national_scale(override_dict=override)
class TestNationalScaleExampleModelInfeasibility:
def example_tester(self):
with pytest.warns(calliope.exceptions.ModelWarning) as excinfo:
model = calliope.examples.national_scale(
scenario='check_feasibility',
override_dict={'run.cyclic_storage': False}
)
expected_warnings = [
'Objective function argument `cost_class` given but not used by objective function `check_feasibility`',
'Objective function argument `sense` given but not used by objective function `check_feasibility`'
]
assert check_error_or_warning(excinfo, expected_warnings)
model.run()
assert model.results.attrs['termination_condition'] == 'other'
assert 'systemwide_levelised_cost' not in model.results.data_vars
assert 'systemwide_capacity_factor' not in model.results.data_vars
def test_nationalscale_example_results_glpk(self):
self.example_tester()
class TestNationalScaleExampleModelOperate:
def example_tester(self):
with pytest.warns(calliope.exceptions.ModelWarning) as excinfo:
model = calliope.examples.national_scale(
override_dict={'model.subset_time': ['2005-01-01', '2005-01-03']},
scenario='operate')
model.run()
expected_warnings = [
'Energy capacity constraint removed from region1::demand_power as force_resource is applied',
'Energy capacity constraint removed from region2::demand_power as force_resource is applied',
'Resource capacity constraint defined and set to infinity for all supply_plus techs'
]
assert check_error_or_warning(excinfo, expected_warnings)
assert all(model.results.timesteps == pd.date_range('2005-01', '2005-01-03 23:00:00', freq='H'))
def test_nationalscale_example_results_glpk(self):
self.example_tester()
class TestNationalScaleResampledExampleModelSenseChecks:
def example_tester(self, solver='glpk', solver_io=None):
override = {
'model.subset_time': '2005-01-01',
'run.solver': solver,
}
if solver_io:
override['run.solver_io'] = solver_io
model = calliope.examples.time_resampling(override_dict=override)
model.run()
assert model.results.storage_cap.to_pandas()['region1-1::csp'] == approx(23563.444)
assert model.results.storage_cap.to_pandas()['region2::battery'] == approx(6315.78947)
assert model.results.energy_cap.to_pandas()['region1-1::csp'] == approx(1440.8377)
assert model.results.energy_cap.to_pandas()['region2::battery'] == approx(1000)
assert model.results.energy_cap.to_pandas()['region1::ccgt'] == approx(30000)
assert float(model.results.cost.sum()) == approx(37344.221869)
assert float(
model.results.systemwide_levelised_cost.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.063543, abs=0.000001)
assert float(
model.results.systemwide_capacity_factor.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.25, abs=0.000001)
def test_nationalscale_resampled_example_results_glpk(self):
self.example_tester()
def test_nationalscale_resampled_example_results_cbc(self):
# Check for existence of the `cbc` command
if shutil.which('cbc'):
self.example_tester(solver='cbc')
else:
pytest.skip('CBC not installed')
class TestNationalScaleClusteredExampleModelSenseChecks:
def model_runner(self, solver='glpk', solver_io=None,
how='closest', storage_inter_cluster=False,
cyclic=False, storage=True):
override = {
'model.time.function_options': {
'how': how, 'storage_inter_cluster': storage_inter_cluster
},
'run.solver': solver,
'run.cyclic_storage': cyclic
}
if storage is False:
override.update({
'techs.battery.exists': False,
'techs.csp.exists': False
})
if solver_io:
override['run.solver_io'] = solver_io
model = calliope.examples.time_clustering(override_dict=override)
model.run()
return model
def example_tester_closest(self, solver='glpk', solver_io=None):
model = self.model_runner(solver=solver, solver_io=solver_io, how='closest')
# Full 1-hourly model run: 22312488.670967
assert float(model.results.cost.sum()) == approx(51711873.203096)
# Full 1-hourly model run: 0.296973
assert float(
model.results.systemwide_levelised_cost.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.111456, abs=0.000001)
# Full 1-hourly model run: 0.064362
assert float(
model.results.systemwide_capacity_factor.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.074809, abs=0.000001)
def example_tester_mean(self, solver='glpk', solver_io=None):
model = self.model_runner(solver=solver, solver_io=solver_io, how='mean')
# Full 1-hourly model run: 22312488.670967
assert float(model.results.cost.sum()) == approx(45110415.5627)
# Full 1-hourly model run: 0.296973
assert float(
model.results.systemwide_levelised_cost.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.126099, abs=0.000001)
# Full 1-hourly model run: 0.064362
assert float(
model.results.systemwide_capacity_factor.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.047596, abs=0.000001)
def example_tester_storage_inter_cluster(self):
model = self.model_runner(storage_inter_cluster=True)
# Full 1-hourly model run: 22312488.670967
assert float(model.results.cost.sum()) == approx(33353390.222036)
# Full 1-hourly model run: 0.296973
assert float(
model.results.systemwide_levelised_cost.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.115866, abs=0.000001)
# Full 1-hourly model run: 0.064362
assert float(
model.results.systemwide_capacity_factor.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.074167, abs=0.000001)
def test_nationalscale_clustered_example_closest_results_glpk(self):
self.example_tester_closest()
def test_nationalscale_clustered_example_closest_results_cbc(self):
# Check for existence of the `cbc` command
if shutil.which('cbc'):
self.example_tester_closest(solver='cbc')
else:
pytest.skip('CBC not installed')
def test_nationalscale_clustered_example_mean_results_glpk(self):
self.example_tester_mean()
def test_nationalscale_clustered_example_mean_results_cbc(self):
# Check for existence of the `cbc` command
if shutil.which('cbc'):
self.example_tester_mean(solver='cbc')
else:
pytest.skip('CBC not installed')
def test_nationalscale_clustered_example_storage_inter_cluster(self):
self.example_tester_storage_inter_cluster()
def test_storage_inter_cluster_cyclic(self):
model = self.model_runner(storage_inter_cluster=True, cyclic=True)
# Full 1-hourly model run: 22312488.670967
assert float(model.results.cost.sum()) == approx(18838244.087694)
# Full 1-hourly model run: 0.296973
assert float(
model.results.systemwide_levelised_cost.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.133111, abs=0.000001)
# Full 1-hourly model run: 0.064362
assert float(
model.results.systemwide_capacity_factor.loc[dict(carriers='power')].to_pandas().T['battery']
) == approx(0.071411, abs=0.000001)
def test_storage_inter_cluster_no_storage(self):
with pytest.warns(calliope.exceptions.ModelWarning) as excinfo:
self.model_runner(storage_inter_cluster=True, storage=False)
expected_warnings = [
'Tech battery was removed by setting ``exists: False``',
'Tech csp was removed by setting ``exists: False``'
]
assert check_error_or_warning(excinfo, expected_warnings)
class TestUrbanScaleExampleModelSenseChecks:
def example_tester(self, resource_unit, solver='glpk'):
unit_override = {
'techs.pv.constraints': {
'resource': 'file=pv_resource.csv:{}'.format(resource_unit),
'resource_unit': 'energy_{}'.format(resource_unit)
},
'run.solver': solver
}
override = {'model.subset_time': '2005-07-01', **unit_override}
model = calliope.examples.urban_scale(override_dict=override)
model.run()
assert model.results.energy_cap.to_pandas()['X1::chp'] == approx(250.090112)
# GLPK isn't able to get the same answer both times, so we have to account for that here
if resource_unit == 'per_cap' and solver == 'glpk':
heat_pipe_approx = 183.45825
else:
heat_pipe_approx = 182.19260
assert model.results.energy_cap.to_pandas()['X2::heat_pipes:N1'] == approx(heat_pipe_approx)
assert model.results.carrier_prod.sum('timesteps').to_pandas()['X3::boiler::heat'] == approx(0.18720)
assert model.results.resource_area.to_pandas()['X2::pv'] == approx(830.064659)
assert float(model.results.carrier_export.sum()) == approx(122.7156)
# GLPK doesn't agree with commercial solvers, so we have to account for that here
cost_sum = 430.097399 if solver == 'glpk' else 430.089188
assert float(model.results.cost.sum()) == approx(cost_sum)
def test_urban_example_results_area(self):
self.example_tester('per_area')
def test_urban_example_results_area_gurobi(self):
# Check for existence of the `gurobi` solver
if shutil.which('gurobi'):
self.example_tester('per_area', 'gurobi')
else:
pytest.skip('Gurobi not installed')
def test_urban_example_results_cap(self):
self.example_tester('per_cap')
def test_urban_example_results_cap_gurobi(self):
# Check for existence of the `gurobi` solver
if shutil.which('gurobi'):
self.example_tester('per_cap', 'gurobi')
else:
pytest.skip('Gurobi not installed')
def test_milp_example_results(self):
model = calliope.examples.milp(
override_dict={'model.subset_time': '2005-01-01'}
)
model.run()
assert model.results.energy_cap.to_pandas()['X1::chp'] == 300
assert model.results.energy_cap.to_pandas()['X2::heat_pipes:N1'] == approx(188.363137)
assert model.results.carrier_prod.sum('timesteps').to_pandas()['X1::supply_gas::gas'] == approx(12363.173036)
assert float(model.results.carrier_export.sum()) == approx(0)
assert model.results.purchased.to_pandas()['X2::boiler'] == 1
assert model.results.units.to_pandas()['X1::chp'] == 1
assert float(model.results.operating_units.sum()) == 24
assert float(model.results.cost.sum()) == approx(540.780779)
def test_operate_example_results(self):
model = calliope.examples.operate(
override_dict={'model.subset_time': ['2005-07-01', '2005-07-04']}
)
with pytest.warns(calliope.exceptions.ModelWarning) as excinfo:
model.run()
expected_warnings = [
'Energy capacity constraint removed',
'Resource capacity constraint defined and set to infinity for all supply_plus techs'
]
assert check_error_or_warning(excinfo, expected_warnings)
assert all(model.results.timesteps == pd.date_range('2005-07', '2005-07-04 23:00:00', freq='H'))
| apache-2.0 |
madmax983/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_DEPRECATED_ecologyGBM.py | 3 | 4166 | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import numpy as np
from sklearn import ensemble
from sklearn.metrics import roc_auc_score
def ecologyGBM():
#Log.info("Importing ecology_model.csv data...\n")
ecology_train = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/ecology_model.csv"))
#Log.info("Summary of the ecology data from h2o: \n")
#ecology.summary()
# Log.info("==============================")
# Log.info("H2O GBM Params: ")
# Log.info("x = ecology_train[2:14]")
# Log.info("y = ecology_train["Angaus"]")
# Log.info("ntrees = 100")
# Log.info("max_depth = 5")
# Log.info("min_rows = 10")
# Log.info("learn_rate = 0.1")
# Log.info("==============================")
# Log.info("==============================")
# Log.info("scikit GBM Params: ")
# Log.info("learning_rate=0.1")
# Log.info("n_estimators=100")
# Log.info("max_depth=5")
# Log.info("min_samples_leaf = 10")
# Log.info("n.minobsinnode = 10")
# Log.info("max_features=None")
# Log.info("==============================")
ntrees = 100
max_depth = 5
min_rows = 10
learn_rate = 0.1
# Prepare data for scikit use
trainData = np.genfromtxt(pyunit_utils.locate("smalldata/gbm_test/ecology_model.csv"),
delimiter=',',
dtype=None,
names=("Site","Angaus","SegSumT","SegTSeas","SegLowFlow","DSDist","DSMaxSlope","USAvgT",
"USRainDays","USSlope","USNative","DSDam","Method","LocSed"),
skip_header=1,
missing_values=('NA'),
filling_values=(np.nan))
trainDataResponse = trainData["Angaus"]
trainDataFeatures = trainData[["SegSumT","SegTSeas","SegLowFlow","DSDist","DSMaxSlope","USAvgT",
"USRainDays","USSlope","USNative","DSDam","Method","LocSed"]]
ecology_train["Angaus"] = ecology_train["Angaus"].asfactor()
# Train H2O GBM Model:
gbm_h2o = h2o.gbm(x=ecology_train[2:], y=ecology_train["Angaus"], ntrees=ntrees, learn_rate=learn_rate,
max_depth=max_depth, min_rows=min_rows, distribution="bernoulli")
# Train scikit GBM Model:
gbm_sci = ensemble.GradientBoostingClassifier(learning_rate=learn_rate, n_estimators=ntrees, max_depth=max_depth,
min_samples_leaf=min_rows, max_features=None)
gbm_sci.fit(trainDataFeatures[:,np.newaxis],trainDataResponse)
# Evaluate the trained models on test data
# Load the test data (h2o)
ecology_test = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/ecology_eval.csv"))
# Load the test data (scikit)
testData = np.genfromtxt(pyunit_utils.locate("smalldata/gbm_test/ecology_eval.csv"),
delimiter=',',
dtype=None,
names=("Angaus","SegSumT","SegTSeas","SegLowFlow","DSDist","DSMaxSlope","USAvgT",
"USRainDays","USSlope","USNative","DSDam","Method","LocSed"),
skip_header=1,
missing_values=('NA'),
filling_values=(np.nan))
testDataResponse = testData["Angaus"]
testDataFeatures = testData[["SegSumT","SegTSeas","SegLowFlow","DSDist","DSMaxSlope","USAvgT",
"USRainDays","USSlope","USNative","DSDam","Method","LocSed"]]
# Score on the test data and compare results
# scikit
auc_sci = roc_auc_score(testDataResponse, gbm_sci.predict_proba(testDataFeatures[:,np.newaxis])[:,1])
# h2o
gbm_perf = gbm_h2o.model_performance(ecology_test)
auc_h2o = gbm_perf.auc()
#Log.info(paste("scikit AUC:", auc_sci, "\tH2O AUC:", auc_h2o))
assert auc_h2o >= auc_sci, "h2o (auc) performance degradation, with respect to scikit"
if __name__ == "__main__":
pyunit_utils.standalone_test(ecologyGBM)
else:
ecologyGBM()
| apache-2.0 |
huzq/scikit-learn | examples/model_selection/plot_grid_search_refit_callable.py | 25 | 3648 | """
==================================================
Balance model complexity and cross-validated score
==================================================
This example balances model complexity and cross-validated score by
finding a decent accuracy within 1 standard deviation of the best accuracy
score while minimising the number of PCA components [1].
The figure shows the trade-off between cross-validated score and the number
of PCA components. The balanced case is when n_components=10 and accuracy=0.88,
which falls into the range within 1 standard deviation of the best accuracy
score.
[1] Hastie, T., Tibshirani, R.,, Friedman, J. (2001). Model Assessment and
Selection. The Elements of Statistical Learning (pp. 219-260). New York,
NY, USA: Springer New York Inc..
"""
# Author: Wenhao Zhang <wenhaoz@ucla.edu>
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
def lower_bound(cv_results):
"""
Calculate the lower bound within 1 standard deviation
of the best `mean_test_scores`.
Parameters
----------
cv_results : dict of numpy(masked) ndarrays
See attribute cv_results_ of `GridSearchCV`
Returns
-------
float
Lower bound within 1 standard deviation of the
best `mean_test_score`.
"""
best_score_idx = np.argmax(cv_results['mean_test_score'])
return (cv_results['mean_test_score'][best_score_idx]
- cv_results['std_test_score'][best_score_idx])
def best_low_complexity(cv_results):
"""
Balance model complexity with cross-validated score.
Parameters
----------
cv_results : dict of numpy(masked) ndarrays
See attribute cv_results_ of `GridSearchCV`.
Return
------
int
Index of a model that has the fewest PCA components
while has its test score within 1 standard deviation of the best
`mean_test_score`.
"""
threshold = lower_bound(cv_results)
candidate_idx = np.flatnonzero(cv_results['mean_test_score'] >= threshold)
best_idx = candidate_idx[cv_results['param_reduce_dim__n_components']
[candidate_idx].argmin()]
return best_idx
pipe = Pipeline([
('reduce_dim', PCA(random_state=42)),
('classify', LinearSVC(random_state=42, C=0.01)),
])
param_grid = {
'reduce_dim__n_components': [6, 8, 10, 12, 14]
}
grid = GridSearchCV(pipe, cv=10, n_jobs=1, param_grid=param_grid,
scoring='accuracy', refit=best_low_complexity)
X, y = load_digits(return_X_y=True)
grid.fit(X, y)
n_components = grid.cv_results_['param_reduce_dim__n_components']
test_scores = grid.cv_results_['mean_test_score']
plt.figure()
plt.bar(n_components, test_scores, width=1.3, color='b')
lower = lower_bound(grid.cv_results_)
plt.axhline(np.max(test_scores), linestyle='--', color='y',
label='Best score')
plt.axhline(lower, linestyle='--', color='.5', label='Best score - 1 std')
plt.title("Balance model complexity and cross-validated score")
plt.xlabel('Number of PCA components used')
plt.ylabel('Digit classification accuracy')
plt.xticks(n_components.tolist())
plt.ylim((0, 1.0))
plt.legend(loc='upper left')
best_index_ = grid.best_index_
print("The best_index_ is %d" % best_index_)
print("The n_components selected is %d" % n_components[best_index_])
print("The corresponding accuracy score is %.2f"
% grid.cv_results_['mean_test_score'][best_index_])
plt.show()
| bsd-3-clause |
neurospin/pylearn-epac | epac/sklearn_plugins/estimators.py | 1 | 11147 | """
Estimator wrap ML procedure into EPAC Node. To be EPAC compatible, one should
inherit from BaseNode and implement the "transform" method.
InternalEstimator and LeafEstimator aim to provide automatic wrapper to objects
that implement fit and predict methods.
@author: edouard.duchesnay@cea.fr
@author: jinpeng.li@cea.fr
"""
## Abreviations
## tr: train
## te: test
from epac.utils import _func_get_args_names
from epac.utils import train_test_merge
from epac.utils import train_test_split
from epac.utils import _dict_suffix_keys
from epac.utils import _sub_dict, _as_dict
from epac.configuration import conf
from epac.workflow.wrappers import Wrapper
class Estimator(Wrapper):
"""Estimator Wrapper: Automatically connect wrapped_node.fit and
wrapped_node.transform to BaseNode.transform
Parameters
----------
wrapped_node: any class with fit and transform or fit and predict functions
any class implementing fit and transform
or implementing fit and predict
in_args_fit: list of strings
names of input arguments of the fit method. If missing,
discover it automatically.
in_args_transform: list of strings
names of input arguments of the transform method. If missing,
discover it automatically.
in_args_predict: list of strings
names of input arguments of the predict method. If missing,
discover it automatically
Example
-------
>>> from sklearn.lda import LDA
>>> from sklearn import datasets
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.svm import SVC
>>> from epac import Pipe
>>> from epac import CV, Methods
>>> from epac.sklearn_plugins import Estimator
>>>
>>> X, y = datasets.make_classification(n_samples=15,
... n_features=10,
... n_informative=7,
... random_state=5)
>>> Xy = dict(X=X, y=y)
>>> lda_estimator = Estimator(LDA())
>>> lda_estimator.transform(**Xy)
{'y/true': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/pred': array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1])}
>>> pipe = Pipe(SelectKBest(k=7), lda_estimator)
>>> pipe.run(**Xy)
{'y/true': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/pred': array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1])}
>>> pipe2 = Pipe(lda_estimator, SVC())
>>> pipe2.run(**Xy)
{'y/true': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/pred': array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1])}
>>> cv = CV(Methods(pipe, SVC()), n_folds=3)
>>> cv.run(**Xy)
[[{'y/test/pred': array([0, 0, 0, 0, 0, 0]), 'y/train/pred': array([0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/test/true': array([1, 0, 1, 1, 0, 0])}, {'y/test/pred': array([0, 0, 0, 1, 0, 0]), 'y/train/pred': array([0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/test/true': array([1, 0, 1, 1, 0, 0])}], [{'y/test/pred': array([0, 0, 0, 0, 1]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 0, 1, 1])}, {'y/test/pred': array([1, 0, 0, 1, 1]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 0, 1, 1])}], [{'y/test/pred': array([1, 1, 0, 0]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 1, 1])}, {'y/test/pred': array([0, 0, 1, 0]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 1, 1])}]]
>>> cv.reduce()
ResultSet(
[{'key': SelectKBest/LDA/SVC, 'y/test/score_precision': [ 0.5 0.33333333], 'y/test/score_recall': [ 0.75 0.14285714], 'y/test/score_accuracy': 0.466666666667, 'y/test/score_f1': [ 0.6 0.2], 'y/test/score_recall_mean': 0.446428571429},
{'key': SVC, 'y/test/score_precision': [ 0.7 0.8], 'y/test/score_recall': [ 0.875 0.57142857], 'y/test/score_accuracy': 0.733333333333, 'y/test/score_f1': [ 0.77777778 0.66666667], 'y/test/score_recall_mean': 0.723214285714}])
>>> cv2 = CV(Methods(pipe2, SVC()), n_folds=3)
>>> cv2.run(**Xy)
[[{'y/test/pred': array([0, 0, 0, 0, 0, 0]), 'y/train/pred': array([0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/test/true': array([1, 0, 1, 1, 0, 0])}, {'y/test/pred': array([0, 0, 0, 1, 0, 0]), 'y/train/pred': array([0, 0, 0, 0, 1, 0, 1, 1, 1]), 'y/test/true': array([1, 0, 1, 1, 0, 0])}], [{'y/test/pred': array([0, 0, 0, 0, 0]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 0, 1, 1])}, {'y/test/pred': array([1, 0, 0, 1, 1]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 0, 1, 1])}], [{'y/test/pred': array([1, 1, 0, 0]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 1, 1])}, {'y/test/pred': array([0, 0, 1, 0]), 'y/train/pred': array([1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1]), 'y/test/true': array([0, 0, 1, 1])}]]
>>> cv2.reduce()
ResultSet(
[{'key': LDA/SVC, 'y/test/score_precision': [ 0.46153846 0. ], 'y/test/score_recall': [ 0.75 0. ], 'y/test/score_accuracy': 0.4, 'y/test/score_f1': [ 0.57142857 0. ], 'y/test/score_recall_mean': 0.375},
{'key': SVC, 'y/test/score_precision': [ 0.7 0.8], 'y/test/score_recall': [ 0.875 0.57142857], 'y/test/score_accuracy': 0.733333333333, 'y/test/score_f1': [ 0.77777778 0.66666667], 'y/test/score_recall_mean': 0.723214285714}])
"""
def __init__(self,
wrapped_node,
in_args_fit=None,
in_args_transform=None,
in_args_predict=None,
out_args_predict=None):
is_fit_estimator = False
if hasattr(wrapped_node, "fit") and hasattr(wrapped_node, "transform"):
is_fit_estimator = True
elif hasattr(wrapped_node, "fit") and hasattr(wrapped_node, "predict"):
is_fit_estimator = True
if not is_fit_estimator:
raise ValueError("%s should implement fit and transform or fit "
"and predict" % wrapped_node.__class__.__name__)
super(Estimator, self).__init__(wrapped_node=wrapped_node)
if in_args_fit:
self.in_args_fit = in_args_fit
else:
self.in_args_fit = _func_get_args_names(self.wrapped_node.fit)
# Internal Estimator
if hasattr(wrapped_node, "transform"):
if in_args_transform:
self.in_args_transform = in_args_transform
else:
self.in_args_transform = \
_func_get_args_names(self.wrapped_node.transform)
# Leaf Estimator
if hasattr(wrapped_node, "predict"):
if in_args_predict:
self.in_args_predict = in_args_predict
else:
self.in_args_predict = \
_func_get_args_names(self.wrapped_node.predict)
if out_args_predict is None:
fit_predict_diff = list(set(self.in_args_fit).difference(
self.in_args_predict))
if len(fit_predict_diff) > 0:
self.out_args_predict = fit_predict_diff
else:
self.out_args_predict = self.in_args_predict
else:
self.out_args_predict = out_args_predict
def _wrapped_node_transform(self, **Xy):
Xy_out = _as_dict(self.wrapped_node.transform(
**_sub_dict(Xy, self.in_args_transform)),
keys=self.in_args_transform)
return Xy_out
def _wrapped_node_predict(self, **Xy):
Xy_out = _as_dict(self.wrapped_node.predict(
**_sub_dict(Xy, self.in_args_predict)),
keys=self.out_args_predict)
return Xy_out
def transform(self, **Xy):
"""
Parameter
---------
Xy: dictionary
parameters for fit and transform
"""
is_fit_predict = False
is_fit_transform = False
if (hasattr(self.wrapped_node, "transform") and
hasattr(self.wrapped_node, "predict")):
if not self.children:
# leaf node
is_fit_predict = True
else:
# internal node
is_fit_transform = True
elif hasattr(self.wrapped_node, "transform"):
is_fit_transform = True
elif hasattr(self.wrapped_node, "predict"):
is_fit_predict = True
if is_fit_transform:
Xy_train, Xy_test = train_test_split(Xy)
if Xy_train is not Xy_test:
res = self.wrapped_node.fit(**_sub_dict(Xy_train,
self.in_args_fit))
Xy_out_tr = self._wrapped_node_transform(**Xy_train)
Xy_out_te = self._wrapped_node_transform(**Xy_test)
Xy_out = train_test_merge(Xy_out_tr, Xy_out_te)
else:
res = self.wrapped_node.fit(**_sub_dict(Xy, self.in_args_fit))
Xy_out = self._wrapped_node_transform(**Xy)
# update ds with transformed values
Xy.update(Xy_out)
return Xy
elif is_fit_predict:
Xy_train, Xy_test = train_test_split(Xy)
if Xy_train is not Xy_test:
Xy_out = dict()
res = self.wrapped_node.fit(**_sub_dict(Xy_train,
self.in_args_fit))
Xy_out_tr = self._wrapped_node_predict(**Xy_train)
Xy_out_tr = _dict_suffix_keys(
Xy_out_tr,
suffix=conf.SEP + conf.TRAIN + conf.SEP + conf.PREDICTION)
Xy_out.update(Xy_out_tr)
# Test predict
Xy_out_te = self._wrapped_node_predict(**Xy_test)
Xy_out_te = _dict_suffix_keys(
Xy_out_te,
suffix=conf.SEP + conf.TEST + conf.SEP + conf.PREDICTION)
Xy_out.update(Xy_out_te)
## True test
Xy_test_true = _sub_dict(Xy_test, self.out_args_predict)
Xy_out_true = _dict_suffix_keys(
Xy_test_true,
suffix=conf.SEP + conf.TEST + conf.SEP + conf.TRUE)
Xy_out.update(Xy_out_true)
else:
res = self.wrapped_node.fit(**_sub_dict(Xy, self.in_args_fit))
Xy_out = self._wrapped_node_predict(**Xy)
Xy_out = _dict_suffix_keys(
Xy_out,
suffix=conf.SEP + conf.PREDICTION)
## True test
Xy_true = _sub_dict(Xy, self.out_args_predict)
Xy_out_true = _dict_suffix_keys(
Xy_true,
suffix=conf.SEP + conf.TRUE)
Xy_out.update(Xy_out_true)
return Xy_out
else:
raise ValueError("%s should implement either transform or predict"
% self.wrapped_node.__class__.__name__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| bsd-3-clause |
maminian/skewtools | scripts/hist_panels.py | 1 | 1838 | import scripts.skewtools as st
from numpy import linspace,unique,shape,size,abs,argmin,mod
import glob
import matplotlib.pyplot as pyplot
import sys
fl = glob.glob(sys.argv[1])
fl.sort()
Pes = st.gatherDataFromFiles(fl,'Peclet')
aratios = st.gatherDataFromFiles(fl,'aratio')
hcs = st.gatherDataFromFiles(fl,'Hist_centers')
hhs = st.gatherDataFromFiles(fl,'Hist_heights')
# Assume the arrays share a common time stepping.
t = st.gatherDataFromFiles([fl[0]],'Time')[0]
m,n = 4,4
#tidxs = linspace(size(t)/2.,size(t)-1,m*n).astype('int')
tidxs = linspace(0,size(t)-1,m*n).astype('int')
for i in range(len(tidxs)):
print 'tau=%.2g'%t[tidxs[i]]
# end for
fig,ax = pyplot.subplots(m,n,figsize=(3*m,3*n))
alist = unique(aratios)
alist.sort()
mycm=pyplot.cm.gnuplot2(linspace(0,0.7,len(alist)))
# for outlining the time in the figures..
props = dict(boxstyle='round',facecolor='white',alpha=0.8)
for idx in range(m*n):
j = idx%n
i = (idx-j)/n
for k in range(len(fl)):
cidx = argmin(abs(alist-aratios[k]))
ax[i,j].step(hcs[k][:,tidxs[idx]],hhs[k][:,tidxs[idx]],where='mid',lw=0.5,color=mycm[cidx],alpha=0.4)
# end for
ax[i,j].set_yticklabels([])
ax[i,j].set_xticklabels([])
ax[i,j].grid()
ax[i,j].text(0.05,0.8,r'$\tau=%.2g$'%t[tidxs[idx]],transform=ax[i,j].transAxes,
color='black',fontsize=16,ha='left',va='center',bbox=props)
# end for
if (idx==0):
for p in range(len(alist)):
ax[i,j].plot(0,0,lw=1,color=mycm[p],label=r'$\lambda=%.4f$'%alist[p])
# end for
# end if
# end for
ax[0,0].legend(loc='lower right')
fig.suptitle(r'$Pe = %i$'%Pes[0],ha='center',va='center',fontsize=36)
pyplot.tight_layout()
pyplot.subplots_adjust(top=0.95)
pyplot.savefig('hist_panels.png',dpi=120,bbox_inches='tight')
pyplot.show(block=False)
| gpl-3.0 |
JeffAbrahamson/gtd | cluster_tfidf_example.py | 1 | 6537 | #!/usr/bin/env python3
"""Cluster data by tf-idf.
"""
from __future__ import print_function
from lib_gtd import gtd_load
from sklearn import metrics
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn.cluster import KMeans, MiniBatchKMeans
from optparse import OptionParser
from time import time
import logging
import sys
import numpy as np
def load_data(input_filename):
"""Load data.
Only return unique labels (so we don't care here how many times a
window was in front).
"""
dframe = gtd_load(input_filename, 'tasks')
print('Got {cnt} text labels, of which {ucnt} unique'.format(
cnt=len(dframe.label),
ucnt=len(dframe.label.unique())
))
return dframe.label.unique()
def do_cluster(opts, args):
"""
"""
labels = load_data(opts.input_filename)
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(#n_features=opts.n_features,
analyzer='word',
#stop_words='english',
non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(#n_features=opts.n_features,
analyzer='word',
#stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, #max_features=opts.n_features,
analyzer='word',
min_df=2,
#stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(labels)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=opts.n_components, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=opts.n_components, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(opts.n_components):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
def main():
"""Parse args and then go cluster.
"""
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
op = OptionParser()
op.add_option("--input-filename", dest="input_filename",
help="Name of file for input (without .tasks suffix)",
default="/tmp/gtd-data",
metavar="FILE")
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
do_cluster(opts, args)
if __name__ == '__main__':
main()
| gpl-2.0 |
heplesser/nest-simulator | pynest/examples/brette_gerstner_fig_3d.py | 8 | 3010 | # -*- coding: utf-8 -*-
#
# brette_gerstner_fig_3d.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Testing the adapting exponential integrate and fire model in NEST (Brette and Gerstner Fig 3D)
----------------------------------------------------------------------------------------------
This example tests the adaptive integrate and fire model (AdEx) according to
Brette and Gerstner [1]_ reproduces Figure 3D of the paper.
Note that Brette and Gerstner give the value for `b` in `nA`.
To be consistent with the other parameters in the equations, `b` must be
converted to `pA` (pico Ampere).
References
~~~~~~~~~~
.. [1] Brette R and Gerstner W (2005). Adaptive exponential integrate-and-fire model as an effective
description of neuronal activity J. Neurophysiology. https://doi.org/10.1152/jn.00686.2005
"""
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# First we make sure that the resolution of the simulation is 0.1 ms. This is
# important, since the slop of the action potential is very steep.
res = 0.1
nest.SetKernelStatus({"resolution": res})
neuron = nest.Create("aeif_cond_exp")
###############################################################################
# Set the parameters of the neuron according to the paper.
neuron.set(V_peak=20., E_L=-60.0, a=80.0, b=80.5, tau_w=720.0)
###############################################################################
# Create and configure the stimulus which is a step current.
dc = nest.Create("dc_generator")
dc.set(amplitude=-800.0, start=0.0, stop=400.0)
###############################################################################
# We connect the DC generators.
nest.Connect(dc, neuron, 'all_to_all')
###############################################################################
# And add a ``voltmeter`` to sample the membrane potentials from the neuron
# in intervals of 0.1 ms.
voltmeter = nest.Create("voltmeter", params={'interval': 0.1})
nest.Connect(voltmeter, neuron)
###############################################################################
# Finally, we simulate for 1000 ms and plot a voltage trace to produce the
# figure.
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
plt.axis([0, 1000, -85, 0])
plt.show()
| gpl-2.0 |
phobson/statsmodels | statsmodels/tsa/base/tests/test_datetools.py | 28 | 5620 | from datetime import datetime
import numpy.testing as npt
from statsmodels.tsa.base.datetools import (_date_from_idx,
_idx_from_dates, date_parser, date_range_str, dates_from_str,
dates_from_range, _infer_freq, _freq_to_pandas)
from pandas import DatetimeIndex, PeriodIndex
def test_date_from_idx():
d1 = datetime(2008, 12, 31)
idx = 15
npt.assert_equal(_date_from_idx(d1, idx, 'Q'), datetime(2012, 9, 30))
npt.assert_equal(_date_from_idx(d1, idx, 'A'), datetime(2023, 12, 31))
npt.assert_equal(_date_from_idx(d1, idx, 'B'), datetime(2009, 1, 21))
npt.assert_equal(_date_from_idx(d1, idx, 'D'), datetime(2009, 1, 15))
npt.assert_equal(_date_from_idx(d1, idx, 'W'), datetime(2009, 4, 12))
npt.assert_equal(_date_from_idx(d1, idx, 'M'), datetime(2010, 3, 31))
def test_idx_from_date():
d1 = datetime(2008, 12, 31)
idx = 15
npt.assert_equal(_idx_from_dates(d1, datetime(2012, 9, 30), 'Q'), idx)
npt.assert_equal(_idx_from_dates(d1, datetime(2023, 12, 31), 'A'), idx)
npt.assert_equal(_idx_from_dates(d1, datetime(2009, 1, 21), 'B'), idx)
npt.assert_equal(_idx_from_dates(d1, datetime(2009, 1, 15), 'D'), idx)
# move d1 and d2 forward to end of week
npt.assert_equal(_idx_from_dates(datetime(2009, 1, 4),
datetime(2009, 4, 17), 'W'), idx-1)
npt.assert_equal(_idx_from_dates(d1, datetime(2010, 3, 31), 'M'), idx)
def test_regex_matching_month():
t1 = "1999m4"
t2 = "1999:m4"
t3 = "1999:mIV"
t4 = "1999mIV"
result = datetime(1999, 4, 30)
npt.assert_equal(date_parser(t1), result)
npt.assert_equal(date_parser(t2), result)
npt.assert_equal(date_parser(t3), result)
npt.assert_equal(date_parser(t4), result)
def test_regex_matching_quarter():
t1 = "1999q4"
t2 = "1999:q4"
t3 = "1999:qIV"
t4 = "1999qIV"
result = datetime(1999, 12, 31)
npt.assert_equal(date_parser(t1), result)
npt.assert_equal(date_parser(t2), result)
npt.assert_equal(date_parser(t3), result)
npt.assert_equal(date_parser(t4), result)
def test_dates_from_range():
results = [datetime(1959, 3, 31, 0, 0),
datetime(1959, 6, 30, 0, 0),
datetime(1959, 9, 30, 0, 0),
datetime(1959, 12, 31, 0, 0),
datetime(1960, 3, 31, 0, 0),
datetime(1960, 6, 30, 0, 0),
datetime(1960, 9, 30, 0, 0),
datetime(1960, 12, 31, 0, 0),
datetime(1961, 3, 31, 0, 0),
datetime(1961, 6, 30, 0, 0),
datetime(1961, 9, 30, 0, 0),
datetime(1961, 12, 31, 0, 0),
datetime(1962, 3, 31, 0, 0),
datetime(1962, 6, 30, 0, 0)]
dt_range = dates_from_range('1959q1', '1962q2')
npt.assert_(results == dt_range)
# test with starting period not the first with length
results = results[2:]
dt_range = dates_from_range('1959q3', length=len(results))
npt.assert_(results == dt_range)
# check month
results = [datetime(1959, 3, 31, 0, 0),
datetime(1959, 4, 30, 0, 0),
datetime(1959, 5, 31, 0, 0),
datetime(1959, 6, 30, 0, 0),
datetime(1959, 7, 31, 0, 0),
datetime(1959, 8, 31, 0, 0),
datetime(1959, 9, 30, 0, 0),
datetime(1959, 10, 31, 0, 0),
datetime(1959, 11, 30, 0, 0),
datetime(1959, 12, 31, 0, 0),
datetime(1960, 1, 31, 0, 0),
datetime(1960, 2, 28, 0, 0),
datetime(1960, 3, 31, 0, 0),
datetime(1960, 4, 30, 0, 0),
datetime(1960, 5, 31, 0, 0),
datetime(1960, 6, 30, 0, 0),
datetime(1960, 7, 31, 0, 0),
datetime(1960, 8, 31, 0, 0),
datetime(1960, 9, 30, 0, 0),
datetime(1960, 10, 31, 0, 0),
datetime(1960, 12, 31, 0, 0),
datetime(1961, 1, 31, 0, 0),
datetime(1961, 2, 28, 0, 0),
datetime(1961, 3, 31, 0, 0),
datetime(1961, 4, 30, 0, 0),
datetime(1961, 5, 31, 0, 0),
datetime(1961, 6, 30, 0, 0),
datetime(1961, 7, 31, 0, 0),
datetime(1961, 8, 31, 0, 0),
datetime(1961, 9, 30, 0, 0),
datetime(1961, 10, 31, 0, 0)]
dt_range = dates_from_range("1959m3", length=len(results))
def test_infer_freq():
d1 = datetime(2008, 12, 31)
d2 = datetime(2012, 9, 30)
b = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['B']).values
d = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['D']).values
w = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['W']).values
m = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['M']).values
a = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['A']).values
q = DatetimeIndex(start=d1, end=d2, freq=_freq_to_pandas['Q']).values
assert _infer_freq(w) == 'W-SUN'
assert _infer_freq(a) == 'A-DEC'
assert _infer_freq(q) == 'Q-DEC'
assert _infer_freq(w[:3]) == 'W-SUN'
assert _infer_freq(a[:3]) == 'A-DEC'
assert _infer_freq(q[:3]) == 'Q-DEC'
assert _infer_freq(b[2:5]) == 'B'
assert _infer_freq(b[:3]) == 'D'
assert _infer_freq(b) == 'B'
assert _infer_freq(d) == 'D'
assert _infer_freq(m) == 'M'
assert _infer_freq(d[:3]) == 'D'
assert _infer_freq(m[:3]) == 'M'
def test_period_index():
dates = PeriodIndex(start="1/1/1990", periods=20, freq="M")
npt.assert_(_infer_freq(dates) == "M")
| bsd-3-clause |
ninotoshi/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn.py | 1 | 13706 | """Deep Neural Network estimators."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
from tensorflow.python.ops import nn
# TODO(ipolosukhin): Merge thirdparty DNN with this.
class DNNClassifier(dnn_linear_combined.DNNLinearCombinedClassifier):
"""A classifier for TensorFlow DNN models.
Example:
```
installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6)
impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6)
installed_emb = embedding_column(installed_app_id, dimension=16,
combiner="sum")
impression_emb = embedding_column(impression_app_id, dimension=16,
combiner="sum")
estimator = DNNClassifier(
feature_columns=[installed_emb, impression_emb],
hidden_units=[1024, 512, 256])
# Input builders
def input_fn_train: # returns X, Y
pass
estimator.train(input_fn_train)
def input_fn_eval: # returns X, Y
pass
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
- if `feauture_columns` is None, then `input` must contains only real
valued `Tensor`.
Parameters:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. [64, 32] means first layer has 64 nodes and second one has
32.
feature_columns: An iterable containing all the feature columns used by the
model. All items in the set should be instances of classes derived from
`FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc.
n_classes: number of target classes. Default is binary classification.
It must be greater than 1.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If `None`,
will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
"""
def __init__(self,
hidden_units,
feature_columns=None,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu):
super(DNNClassifier, self).__init__(n_classes=n_classes,
weight_column_name=weight_column_name,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn)
def _get_train_ops(self, features, targets):
"""See base class."""
if self._dnn_feature_columns is None:
self._dnn_feature_columns = layers.infer_real_valued_columns(features)
return super(DNNClassifier, self)._get_train_ops(features, targets)
class DNNRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):
"""A regressor for TensorFlow DNN models.
Example:
```
installed_app_id = sparse_column_with_hash_bucket("installed_id", 1e6)
impression_app_id = sparse_column_with_hash_bucket("impression_id", 1e6)
installed_emb = embedding_column(installed_app_id, dimension=16,
combiner="sum")
impression_emb = embedding_column(impression_app_id, dimension=16,
combiner="sum")
estimator = DNNRegressor(
feature_columns=[installed_emb, impression_emb],
hidden_units=[1024, 512, 256])
# Input builders
def input_fn_train: # returns X, Y
pass
estimator.train(input_fn_train)
def input_fn_eval: # returns X, Y
pass
estimator.evaluate(input_fn_eval)
estimator.predict(x)
```
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`.
- if `feauture_columns` is None, then `input` must contains only real
valued `Tensor`.
Parameters:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. [64, 32] means first layer has 64 nodes and second one has
32.
feature_columns: An iterable containing all the feature columns used by the
model. All items in the set should be instances of classes derived from
`FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If `None`,
will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
"""
def __init__(self,
hidden_units,
feature_columns=None,
model_dir=None,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu):
super(DNNRegressor, self).__init__(weight_column_name=weight_column_name,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_hidden_units=hidden_units,
dnn_activation_fn=activation_fn)
def _get_train_ops(self, features, targets):
"""See base class."""
if self._dnn_feature_columns is None:
self._dnn_feature_columns = layers.infer_real_valued_columns(features)
return super(DNNRegressor, self)._get_train_ops(features, targets)
# TODO(ipolosukhin): Deprecate this class in favor of DNNClassifier.
class TensorFlowDNNClassifier(TensorFlowEstimator, _sklearn.ClassifierMixin):
"""TensorFlow DNN Classifier model.
Parameters:
hidden_units: List of hidden units per layer.
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam", "Adagrad".
learning_rate: If this is constant float value, no decay function is used.
Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are
supposed to have weight one.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
dropout: When not None, the probability we will drop out a given coordinate.
"""
def __init__(self,
hidden_units,
n_classes,
batch_size=32,
steps=200,
optimizer='Adagrad',
learning_rate=0.1,
class_weight=None,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1,
dropout=None):
self.hidden_units = hidden_units
self.dropout = dropout
super(TensorFlowDNNClassifier, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
class_weight=class_weight,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, X, y):
return models.get_dnn_model(self.hidden_units,
models.logistic_regression,
dropout=self.dropout)(X, y)
@property
def weights_(self):
"""Returns weights of the DNN weight layers."""
return [self.get_tensor_value(w.name)
for w in self._graph.get_collection('dnn_weights')
] + [self.get_tensor_value('logistic_regression/weights')]
@property
def bias_(self):
"""Returns bias of the DNN's bias layers."""
return [self.get_tensor_value(b.name)
for b in self._graph.get_collection('dnn_biases')
] + [self.get_tensor_value('logistic_regression/bias')]
class TensorFlowDNNRegressor(TensorFlowEstimator, _sklearn.RegressorMixin):
"""TensorFlow DNN Regressor model.
Parameters:
hidden_units: List of hidden units per layer.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam", "Adagrad".
learning_rate: If this is constant float value, no decay function is
used. Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
dropout: When not None, the probability we will drop out a given coordinate.
"""
def __init__(self,
hidden_units,
n_classes=0,
batch_size=32,
steps=200,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1,
dropout=None):
self.hidden_units = hidden_units
self.dropout = dropout
super(TensorFlowDNNRegressor, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, X, y):
return models.get_dnn_model(self.hidden_units,
models.linear_regression,
dropout=self.dropout)(X, y)
@property
def weights_(self):
"""Returns weights of the DNN weight layers."""
return [self.get_tensor_value(w.name)
for w in self._graph.get_collection('dnn_weights')
] + [self.get_tensor_value('linear_regression/weights')]
@property
def bias_(self):
"""Returns bias of the DNN's bias layers."""
return [self.get_tensor_value(b.name)
for b in self._graph.get_collection('dnn_biases')
] + [self.get_tensor_value('linear_regression/bias')]
| apache-2.0 |
xiyuw123/Tax-Calculator | taxcalc/calculate.py | 1 | 9178 | import pandas as pd
from pandas import DataFrame
import math
import numpy as np
from .utils import *
from .functions import *
from .parameters import Parameters
from .records import Records
all_cols = set()
def add_df(alldfs, df):
for col in df.columns:
if col not in all_cols:
all_cols.add(col)
alldfs.append(df[col])
else:
dup_index = [i for i,series in enumerate(alldfs) if series.name == col][0]
alldfs[dup_index] = df[col]
def calculator(parameters, records, mods="", **kwargs):
if mods:
if isinstance(mods, str):
import json
dd = json.loads(mods)
dd = {k:np.array(v) for k,v in dd.items() if type(v) == list}
kwargs.update(dd)
else:
kwargs.update(mods)
calc = Calculator(parameters, records)
if kwargs:
calc.__dict__.update(kwargs)
for name, vals in kwargs.items():
if name.startswith("_"):
arr = getattr(calc, name)
setattr(calc, name[1:], arr[0])
return calc
class Calculator(object):
@classmethod
def from_files(cls, pfname, rfname, **kwargs):
"""
Create a Calculator object from a Parameters JSON file and a
Records file
Parameters
----------
pfname: filename for Parameters
rfname: filename for Records
"""
params = Parameters.from_file(pfname, **kwargs)
recs = Records.from_file(rfname, **kwargs)
return cls(params, recs)
def __init__(self, parameters=None, records=None, sync_years=True, **kwargs):
if isinstance(parameters, Parameters):
self._parameters = parameters
else:
self._parameters = Parameters.from_file(parameters, **kwargs)
if records is None:
raise ValueError("Must supply tax records path or Records object")
self._records = (records if not isinstance(records, str) else
Records.from_file(records, **kwargs))
if sync_years and self._records.current_year==2008:
print("You loaded data for "+str(self._records.current_year)+'.')
while self._records.current_year < self._parameters.current_year:
self._records.increment_year()
print("Your data have beeen extrapolated to "+str(self._records.current_year)+".")
assert self._parameters.current_year == self._records.current_year
def __deepcopy__(self, memo):
import copy
params = copy.deepcopy(self._parameters)
recs = copy.deepcopy(self._records)
return Calculator(params, recs)
@property
def parameters(self):
return self._parameters
@property
def records(self):
return self._records
def __getattr__(self, name):
"""
Only allowed attributes on a Calculator are 'parameters' and 'records'
"""
if hasattr(self.parameters, name):
return getattr(self.parameters, name)
elif hasattr(self.records, name):
return getattr(self.records, name)
else:
try:
self.__dict__[name]
except KeyError:
raise AttributeError(name + " not found")
def __setattr__(self, name, val):
"""
Only allowed attributes on a Calculator are 'parameters' and 'records'
"""
if name == "_parameters" or name == "_records":
self.__dict__[name] = val
return
if hasattr(self.parameters, name):
return setattr(self.parameters, name, val)
elif hasattr(self.records, name):
return setattr(self.records, name, val)
else:
self.__dict__[name] = val
def __getitem__(self, val):
if val in self.__dict__:
return self.__dict__[val]
else:
try:
return getattr(self.parameters, val)
except AttributeError:
try:
return getattr(self.records, val)
except AttributeError:
raise
def calc_all(self):
FilingStatus(self.parameters, self.records)
Adj(self.parameters, self.records)
CapGains(self.parameters, self.records)
SSBenefits(self.parameters, self.records)
AGI(self.parameters, self.records)
ItemDed(self.parameters, self.records)
EI_FICA(self.parameters, self.records)
AMED(self.parameters, self.records)
StdDed(self.parameters, self.records)
XYZD(self.parameters, self.records)
NonGain(self.parameters, self.records)
TaxGains(self.parameters, self.records)
MUI(self.parameters, self.records)
AMTI(self.parameters, self.records)
F2441(self.parameters, self.records)
DepCareBen(self.parameters, self.records)
ExpEarnedInc(self.parameters, self.records)
RateRed(self.parameters, self.records)
NumDep(self.parameters, self.records)
ChildTaxCredit(self.parameters, self.records)
AmOppCr(self.parameters, self.records)
LLC(self.parameters, self.records)
RefAmOpp(self.parameters, self.records)
NonEdCr(self.parameters, self.records)
AddCTC(self.parameters, self.records)
F5405(self.parameters, self.records)
C1040(self.parameters, self.records)
DEITC(self.parameters, self.records)
OSPC_TAX(self.parameters, self.records)
def calc_all_test(self):
all_dfs = []
add_df(all_dfs, FilingStatus(self.parameters, self.records))
add_df(all_dfs, Adj(self.parameters, self.records))
add_df(all_dfs, CapGains(self.parameters, self.records))
add_df(all_dfs, SSBenefits(self.parameters, self.records))
add_df(all_dfs, AGI(self.parameters, self.records))
add_df(all_dfs, ItemDed(self.parameters, self.records))
add_df(all_dfs, EI_FICA(self.parameters, self.records))
add_df(all_dfs, AMED(self.parameters, self.records))
add_df(all_dfs, StdDed(self.parameters, self.records))
add_df(all_dfs, XYZD(self.parameters, self.records))
add_df(all_dfs, NonGain(self.parameters, self.records))
add_df(all_dfs, TaxGains(self.parameters, self.records))
add_df(all_dfs, MUI(self.parameters, self.records))
add_df(all_dfs, AMTI(self.parameters, self.records))
add_df(all_dfs, F2441(self.parameters, self.records))
add_df(all_dfs, DepCareBen(self.parameters, self.records))
add_df(all_dfs, ExpEarnedInc(self.parameters, self.records))
add_df(all_dfs, RateRed(self.parameters, self.records))
add_df(all_dfs, NumDep(self.parameters, self.records))
add_df(all_dfs, ChildTaxCredit(self.parameters, self.records))
add_df(all_dfs, AmOppCr(self.parameters, self.records))
add_df(all_dfs, LLC(self.parameters, self.records))
add_df(all_dfs, RefAmOpp(self.parameters, self.records))
add_df(all_dfs, NonEdCr(self.parameters, self.records))
add_df(all_dfs, AddCTC(self.parameters, self.records))
add_df(all_dfs, F5405(self.parameters, self.records))
add_df(all_dfs, C1040(self.parameters, self.records))
add_df(all_dfs, DEITC(self.parameters, self.records))
add_df(all_dfs, OSPC_TAX(self.parameters, self.records))
totaldf = pd.concat(all_dfs, axis=1)
return totaldf
def increment_year(self):
self.records.increment_year()
self.parameters.increment_year()
@property
def current_year(self):
return self.parameters.current_year
def mtr(self, income_type_string, diff = 100):
"""
This method calculates the marginal tax rate for every record.
In order to avoid kinks, we find the marginal rates associated with
both a tax increase and a tax decrease and use the more modest of
the two.
"""
income_type = getattr(self, income_type_string)
# Calculate the base level of taxes.
self.calc_all()
taxes_base = np.copy(self._ospctax)
# Calculate the tax change with a marginal increase in income.
setattr(self, income_type_string, income_type + diff)
self.calc_all()
delta_taxes_up = self._ospctax - taxes_base
# Calculate the tax change with a marginal decrease in income.
setattr(self, income_type_string, income_type - diff)
self.calc_all()
delta_taxes_down = taxes_base - self._ospctax
# Reset the income_type to its starting point to avoid
# unintended consequences.
setattr(self, income_type_string, income_type)
self.calc_all()
# Choose the more modest effect of either adding or subtracting income.
delta_taxes = np.where( np.absolute(delta_taxes_up) <=
np.absolute(delta_taxes_down),
delta_taxes_up , delta_taxes_down)
# Calculate the marginal tax rate
mtr = delta_taxes / diff
return mtr
| mit |
Ensembles/ert | python/tests/ert/enkf/export/test_export_join.py | 2 | 4035 | from ert.enkf.export import DesignMatrixReader, SummaryCollector, GenKwCollector, MisfitCollector
from ert.test import ExtendedTestCase, ErtTestContext
import pandas
import numpy
import os
def dumpDesignMatrix(path):
with open(path, "w") as dm:
dm.write("REALIZATION EXTRA_FLOAT_COLUMN EXTRA_INT_COLUMN EXTRA_STRING_COLUMN\n")
dm.write("0 0.08 125 ON\n")
dm.write("1 0.07 225 OFF\n")
dm.write("2 0.08 325 ON\n")
dm.write("3 0.06 425 ON\n")
dm.write("4 0.08 525 OFF\n")
dm.write("5 0.08 625 ON\n")
dm.write("6 0.09 725 ON\n")
dm.write("7 0.08 825 OFF\n")
dm.write("8 0.02 925 ON\n")
dm.write("9 0.08 125 ON\n")
dm.write("10 0.08 225 ON\n")
dm.write("11 0.05 325 OFF\n")
dm.write("12 0.08 425 ON\n")
dm.write("13 0.07 525 ON\n")
dm.write("14 0.08 625 UNKNOWN\n")
dm.write("15 0.08 725 ON\n")
dm.write("16 0.08 825 ON\n")
dm.write("17 0.08 925 OFF\n")
dm.write("18 0.09 125 ON\n")
dm.write("19 0.08 225 ON\n")
dm.write("20 0.06 325 OFF\n")
dm.write("21 0.08 425 ON\n")
dm.write("22 0.07 525 ON\n")
dm.write("23 0.08 625 OFF\n")
dm.write("24 0.08 725 ON\n")
class ExportJoinTest(ExtendedTestCase):
def setUp(self):
os.environ["TZ"] = "CET" # The ert_statoil case was generated in CET
self.config = self.createTestPath("local/snake_oil/snake_oil.ert")
def test_join(self):
with ErtTestContext("python/enkf/export/export_join", self.config) as context:
dumpDesignMatrix("DesignMatrix.txt")
ert = context.getErt()
summary_data = SummaryCollector.loadAllSummaryData(ert, "default_1")
gen_kw_data = GenKwCollector.loadAllGenKwData(ert, "default_1")
misfit = MisfitCollector.loadAllMisfitData(ert, "default_1")
dm = DesignMatrixReader.loadDesignMatrix("DesignMatrix.txt")
result = summary_data.join(gen_kw_data, how='inner')
result = result.join(misfit, how='inner')
result = result.join(dm, how='inner')
first_date = "2010-01-10"
last_date = "2015-06-23"
self.assertFloatEqual(result["SNAKE_OIL_PARAM:OP1_OCTAVES"][0][first_date], 3.947766)
self.assertFloatEqual(result["SNAKE_OIL_PARAM:OP1_OCTAVES"][24][first_date], 4.206698)
self.assertFloatEqual(result["SNAKE_OIL_PARAM:OP1_OCTAVES"][24][last_date], 4.206698)
self.assertFloatEqual(result["EXTRA_FLOAT_COLUMN"][0][first_date], 0.08)
self.assertEqual(result["EXTRA_INT_COLUMN"][0][first_date], 125)
self.assertEqual(result["EXTRA_STRING_COLUMN"][0][first_date], "ON")
self.assertFloatEqual(result["EXTRA_FLOAT_COLUMN"][0][last_date], 0.08)
self.assertEqual(result["EXTRA_INT_COLUMN"][0][last_date], 125)
self.assertEqual(result["EXTRA_STRING_COLUMN"][0][last_date], "ON")
self.assertFloatEqual(result["EXTRA_FLOAT_COLUMN"][1][last_date], 0.07)
self.assertEqual(result["EXTRA_INT_COLUMN"][1][last_date], 225)
self.assertEqual(result["EXTRA_STRING_COLUMN"][1][last_date], "OFF")
self.assertFloatEqual(result["MISFIT:FOPR"][0][last_date], 489.191069)
self.assertFloatEqual(result["MISFIT:FOPR"][24][last_date], 1841.906872)
self.assertFloatEqual(result["MISFIT:TOTAL"][0][first_date], 500.170035)
self.assertFloatEqual(result["MISFIT:TOTAL"][0][last_date], 500.170035)
self.assertFloatEqual(result["MISFIT:TOTAL"][24][last_date], 1925.793865)
with self.assertRaises(KeyError):
realization_13 = result.loc[60]
column_count = len(result.columns)
self.assertEqual(result.dtypes[0], numpy.float64)
self.assertEqual(result.dtypes[column_count - 1], numpy.object)
self.assertEqual(result.dtypes[column_count - 2], numpy.int64)
| gpl-3.0 |
xiaoxiaoyao/PythonApplication1 | PythonApplication1/自己的小练习/HLS/RandomStores.py | 2 | 6100 |
# 读取数据,上传服务器时,请修改此行为对应的文件位置
filePath=r'F:\\OneDrive\\华莱士\\Documents\\门店监控项目\\市场组织架构收集\\城市-营运-督导-门店.csv'
from flask import Flask
# 时间
import datetime
import pandas,numpy.random
numpy.random.seed(int(datetime.datetime.now().strftime("%j")))
fl = pandas.read_csv(filePath).astype('str').sample(frac=1,random_state=numpy.random.RandomState())
fl['label'] = '0'
# fl.drop_duplicates(subset=['城市经理'],keep='first',inplace=True)## 自动去重,确保每个【城市经理】只命中一次
# 网页服务器部分:
###网页模版
template1='''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"><html xmlns="http://www.w3.org/1999/xhtml"><body><a href="../chou/30"><button type="button" class="btn btn-warning">点此按钮随机抽30个</button></a><a href="../all"><button type="button" class="btn btn-warning">点此按钮一键全抽</button></a><a href="../all-no-repeat"><button type="button" class="btn btn-warning">不重复全抽城市经理</button></a>
<a href="../3-suzhou-1-shanghai"><button type="button" class="btn btn-warning">按:苏州30上海10抽取</button></a>
<br /><div class="container">'''
template2='''</div><link href="http://apps.bdimg.com/libs/bootstrap/3.3.0/css/bootstrap.min.css" rel="stylesheet" media="screen"><script src="http://apps.bdimg.com/libs/jquery/2.0.0/jquery.min.js"></script><script src="http://apps.bdimg.com/libs/bootstrap/3.3.0/js/bootstrap.min.js"></script></body></html>'''
###初始化程序_定义服务器
global app
app = Flask(__name__)
@app.route('/chou/<n>', methods=("GET", "POST"))
def 入魂(n): return template1 + "当前时间:" + datetime.datetime.now().strftime("%x %X") + 一发入魂(n=int(n)).to_html(classes='table-striped') + template2
@app.route('/all', methods=("GET", "POST")) #一键全抽城市经理(不去重)
def 一键全抽城市经理(): return template1 + "当前时间:" + datetime.datetime.now().strftime("%x %X") + 城市经理一键全抽().to_html(classes='table-striped') + template2
@app.route('/all-no-repeat/reset', methods=("GET", "POST")) #一键全抽城市经理
def all_no_repeat_reset():
fl['label'] = '0'
return template1 +'重置抽取计数器成功' + template2
@app.route('/all-no-repeat', methods=("GET", "POST")) #一键全抽城市经理
def 不重复抽城市经理(): return template1 + '<a href="../all-no-repeat/reset"><button type="button" class="btn btn-warning">重置抽取计数器</button></a>'+ "今天是全年的第" + datetime.datetime.now().strftime("%j") + "天,服务器当前时间:" + datetime.datetime.now().strftime("%x %X") + 城市经理不重复抽().to_html(classes='table-striped') + template2
# 临时功能,抽10个上海,30个苏州(不去重)
@app.route('/3-suzhou-1-shanghai', methods=("GET", "POST"))
def by区域(): return template1 + "当前时间:" + datetime.datetime.now().strftime("%x %X") + 按区域抽(区域='江苏苏州服务区',n=30).to_html(classes='table-striped') + 按区域抽(区域='上海',n=10).to_html(classes='table-dark') + template2
# 首页
@app.route('/', methods=("GET", "POST"))
def index(): return template1 + "当前时间:" + datetime.datetime.now().strftime("%x %X") + template2
# 抽签开始
# 中间的.drop_duplicates(subset=['城市经理'],keep='first')是用来保证一个'城市经理'只被抽中一次的
def 一发入魂(fl=fl,n=None, frac=None, replace=False): return fl.sample(n=n,frac=frac,replace=replace).drop_duplicates(subset=['城市经理'],keep='first')
def 抽个区域(fl=fl,n=None, frac=None, replace=False): return fl['区域'].sample(n=n,frac=frac,replace=replace).drop_duplicates(subset=['城市经理'],keep='first')
def 按区域抽(区域,fl=fl,n=None, frac=None, replace=False): return fl[fl['区域']==区域].sample(n=n,frac=frac,replace=replace)# 不去重
def 按城市经理抽(城市经理,fl=fl,n=None, frac=None, replace=False): return fl[fl['城市经理']==城市经理].sample(n=n,frac=frac,replace=replace).drop_duplicates(subset=['城市经理'],keep='first')
def 按督导抽(督导,n=None, fl=fl,frac=None, replace=False): return fl[fl['督导']==督导].sample(n=n,frac=frac,replace=replace).drop_duplicates(subset=['城市经理'],keep='first')
def 城市经理一键全抽(fl=fl): return fl.sample(frac=1,random_state=numpy.random.RandomState()).drop_duplicates(subset=['城市经理'],keep='first')
#不重复抽取(重构一下)
def 城市经理不重复抽():
tmp=fl[fl.label=='0'].sample(frac=1,random_state=numpy.random.RandomState()).drop_duplicates(subset=['城市经理'],keep='first')
fl.loc[tmp.index,'label']= datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return tmp
# 直接选
def 选个区域(): return fl['区域'].drop_duplicates().sample(frac=1,random_state=numpy.random.RandomState())
def 选个城市经理(区域): return fl[fl['区域']==区域]['城市经理'].sample(frac=1,random_state=numpy.random.RandomState()).drop_duplicates()
def 选个督导(城市经理): return fl[fl['城市经理']==城市经理]['督导'].sample(frac=1,random_state=numpy.random.RandomState()).drop_duplicates()
#测试随机抽取用
if __name__=='__main__':
print(
"按区域抽(区域='上海')",按区域抽(区域='上海'),
"按城市经理抽(城市经理='蔡欢')",按城市经理抽(城市经理='蔡欢'),
"选个区域()",选个区域(),
"选个城市经理(区域='福州')",选个城市经理(区域='福州'),
"选个督导(城市经理='蔡欢')",选个督导(城市经理='蔡欢'),
)
# "按督导抽(督导='刘立福')",按督导抽(督导='王冰'),
###启动主程序——网页服务器
if __name__ == '__main__':
print('启动主程序——网页服务器',datetime.datetime.now().strftime("%x %X"))
app.run() # 上传服务器时请输入正确的端口(参考之前的文件同位置内容)
#结束程序
print('Thank you',datetime.datetime.now().strftime("%x %X"))
| unlicense |
laszlocsomor/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 79 | 2464 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.generator_io import generator_input_fn
| apache-2.0 |
TomAugspurger/pandas | pandas/tests/arrays/sparse/test_combine_concat.py | 8 | 2651 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray
class TestSparseArrayConcat:
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_basic(self, kind):
a = SparseArray([1, 0, 0, 2], kind=kind)
b = SparseArray([1, 0, 2, 2], kind=kind)
result = SparseArray._concat_same_type([a, b])
# Can't make any assertions about the sparse index itself
# since we aren't don't merge sparse blocs across arrays
# in to_concat
expected = np.array([1, 2, 1, 2, 2], dtype="int64")
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.parametrize("kind", ["integer", "block"])
def test_uses_first_kind(self, kind):
other = "integer" if kind == "block" else "block"
a = SparseArray([1, 0, 0, 2], kind=kind)
b = SparseArray([1, 0, 2, 2], kind=other)
result = SparseArray._concat_same_type([a, b])
expected = np.array([1, 2, 1, 2, 2], dtype="int64")
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.parametrize(
"other, expected_dtype",
[
# compatible dtype -> preserve sparse
(pd.Series([3, 4, 5], dtype="int64"), pd.SparseDtype("int64", 0)),
# (pd.Series([3, 4, 5], dtype="Int64"), pd.SparseDtype("int64", 0)),
# incompatible dtype -> Sparse[common dtype]
(pd.Series([1.5, 2.5, 3.5], dtype="float64"), pd.SparseDtype("float64", 0)),
# incompatible dtype -> Sparse[object] dtype
(pd.Series(["a", "b", "c"], dtype=object), pd.SparseDtype(object, 0)),
# categorical with compatible categories -> dtype of the categories
(pd.Series([3, 4, 5], dtype="category"), np.dtype("int64")),
(pd.Series([1.5, 2.5, 3.5], dtype="category"), np.dtype("float64")),
# categorical with incompatible categories -> object dtype
(pd.Series(["a", "b", "c"], dtype="category"), np.dtype(object)),
],
)
def test_concat_with_non_sparse(other, expected_dtype):
# https://github.com/pandas-dev/pandas/issues/34336
s_sparse = pd.Series([1, 0, 2], dtype=pd.SparseDtype("int64", 0))
result = pd.concat([s_sparse, other], ignore_index=True)
expected = pd.Series(list(s_sparse) + list(other)).astype(expected_dtype)
tm.assert_series_equal(result, expected)
result = pd.concat([other, s_sparse], ignore_index=True)
expected = pd.Series(list(other) + list(s_sparse)).astype(expected_dtype)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
Minhmo/tardis | tardis/gui/widgets.py | 5 | 52003 | import os
if os.environ.get('QT_API', None)=='pyqt':
from PyQt4 import QtGui, QtCore
elif os.environ.get('QT_API', None)=='pyside':
from PySide import QtGui, QtCore
else:
raise ImportError('QT_API was not set! Please exit the IPython console\n'
' and at the bash prompt use : \n\n export QT_API=pyside \n or\n'
' export QT_API=pyqt \n\n For more information refer to user guide.')
import matplotlib
from matplotlib.figure import *
import matplotlib.gridspec as gridspec
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as NavigationToolbar
from matplotlib import colors
from matplotlib.patches import Circle
import matplotlib.pylab as plt
from astropy import units as u
import tardis
from tardis import analysis, util
class MatplotlibWidget(FigureCanvas):
"""Canvas to draw graphs on."""
def __init__(self, tablecreator, parent, fig=None):
"""Create the canvas. Add toolbar depending on the parent."""
self.tablecreator = tablecreator
self.parent = parent
self.figure = Figure()#(frameon=False,facecolor=(1,1,1))
self.cid = {}
if fig != 'model':
self.ax = self.figure.add_subplot(111)
else:
self.gs = gridspec.GridSpec(2, 1, height_ratios=[1, 3])
self.ax1 = self.figure.add_subplot(self.gs[0])
self.ax2 = self.figure.add_subplot(self.gs[1])#, aspect='equal')
self.cb = None
self.span = None
super(MatplotlibWidget, self).__init__(self.figure)
super(MatplotlibWidget, self).setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
super(MatplotlibWidget, self).updateGeometry()
if fig != 'model':
self.toolbar = NavigationToolbar(self, parent)
self.cid[0] = self.figure.canvas.mpl_connect('pick_event',
self.on_span_pick)
else:
self.cid[0] = self.figure.canvas.mpl_connect('pick_event',
self.on_shell_pick)
def show_line_info(self):
"""Show line info for span selected region."""
self.parent.line_info.append(LineInfo(self.parent, self.span.xy[0][0],
self.span.xy[2][0], self.tablecreator))
def show_span(self, garbage=0, left=5000, right=10000):
"""Hide/Show/Change the buttons that show line info
in spectrum plot widget.
"""
if self.parent.spectrum_span_button.text() == 'Show Wavelength Range':
if not self.span:
self.span = self.ax.axvspan(left, right, color='r', alpha=0.3,
picker=self.span_picker)
else:
self.span.set_visible(True)
self.parent.spectrum_line_info_button.show()
self.parent.spectrum_span_button.setText('Hide Wavelength Range')
else:
self.span.set_visible(False)
self.parent.spectrum_line_info_button.hide()
self.parent.spectrum_span_button.setText('Show Wavelength Range')
self.draw()
def on_span_pick(self, event):
"""Callback to 'pick'(grab with mouse) the span selector tool."""
self.figure.canvas.mpl_disconnect(self.cid[0])
self.span.set_edgecolor('m')
self.span.set_linewidth(5)
self.draw()
if event.edge == 'left':
self.cid[1] = self.figure.canvas.mpl_connect('motion_notify_event',
self.on_span_left_motion)
elif event.edge == 'right':
self.cid[1] = self.figure.canvas.mpl_connect('motion_notify_event',
self.on_span_right_motion)
self.cid[2] = self.figure.canvas.mpl_connect('button_press_event',
self.on_span_resized)
def on_span_left_motion(self, mouseevent):
"""Update data of span selector tool on left movement of mouse and
redraw.
"""
if mouseevent.xdata < self.span.xy[2][0]:
self.span.xy[0][0] = mouseevent.xdata
self.span.xy[1][0] = mouseevent.xdata
self.span.xy[4][0] = mouseevent.xdata
self.draw()
def on_span_right_motion(self, mouseevent):
"""Update data of span selector tool on right movement of mouse and
redraw.
"""
if mouseevent.xdata > self.span.xy[0][0]:
self.span.xy[2][0] = mouseevent.xdata
self.span.xy[3][0] = mouseevent.xdata
self.draw()
def on_span_resized(self, mouseevent):
"""Redraw the red rectangle to currently selected span."""
self.figure.canvas.mpl_disconnect(self.cid[1])
self.figure.canvas.mpl_disconnect(self.cid[2])
self.cid[0] = self.figure.canvas.mpl_connect('pick_event',
self.on_span_pick)
self.span.set_edgecolor('r')
self.span.set_linewidth(1)
self.draw()
def on_shell_pick(self, event):
"""Highlight the shell that was picked."""
self.highlight_shell(event.artist.index)
def highlight_shell(self, index):
"""Change edgecolor of highlighted shell."""
self.parent.tableview.selectRow(index)
for i in range(len(self.parent.shells)):
if i != index and i != index + 1:
self.parent.shells[i].set_edgecolor('k')
else:
self.parent.shells[i].set_edgecolor('w')
self.draw()
def shell_picker(self, shell, mouseevent):
"""Enable picking shells in the shell plot."""
if mouseevent.xdata is None:
return False, dict()
mouse_r2 = mouseevent.xdata ** 2 + mouseevent.ydata ** 2
if shell.r_inner ** 2 < mouse_r2 < shell.r_outer ** 2:
return True, dict()
return False, dict()
def span_picker(self, span, mouseevent, tolerance=5):
"""Detect mouseclicks inside tolerance region of the span selector
tool and pick it.
"""
left = float(span.xy[0][0])
right = float(span.xy[2][0])
tolerance = span.axes.transData.inverted().transform((tolerance, 0)
)[0] - span.axes.transData.inverted().transform((0, 0))[0]
event_attributes = {'edge': None}
if mouseevent.xdata is None:
return False, event_attributes
if left - tolerance <= mouseevent.xdata <= left + tolerance:
event_attributes['edge'] = 'left'
return True, event_attributes
elif right - tolerance <= mouseevent.xdata <= right + tolerance:
event_attributes['edge'] = 'right'
return True, event_attributes
return False, event_attributes
class Shell(matplotlib.patches.Wedge):
"""A data holder to store measurements of shells that will be drawn in
the plot.
"""
def __init__(self, index, center, r_inner, r_outer, **kwargs):
super(Shell, self).__init__(center, r_outer, 0, 90,
width=r_outer - r_inner, **kwargs)
self.index = index
self.center = center
self.r_outer = r_outer
self.r_inner = r_inner
self.width = r_outer - r_inner
class ConfigEditor(QtGui.QWidget):
"""The configuration editor widget.
This widget is added to the stacked widget that is the central widget of
the main top level window created by Tardis.
"""
def __init__(self, yamlconfigfile, parent=None):
"""Create and return the configuration widget.
Parameters
----------
yamlconfigfile: string
File name of the yaml configuration file.
parent: None
Set to None. The parent is changed when the widget is
appended to the layout of its parent.
"""
super(ConfigEditor, self).__init__(parent)
#Configurations from the input and template
configDict = yaml.load(open(yamlconfigfile))
templatedictionary ={'tardis_config_version':[True, 'v1.0'],
'supernova':{ 'luminosity_requested':[True, '1 solLum'],
'time_explosion':[True, None],
'distance':[False, None],
'luminosity_wavelength_start':[False, '0 angstrom'],
'luminosity_wavelength_end':[False, 'inf angstrom'],
},
'atom_data':[True,'File Browser'],
'plasma':{ 'initial_t_inner':[False, '-1K'],
'initial_t_rad':[False,'10000K'],
'disable_electron_scattering':[False, False],
'ionization':[True, None],
'excitation':[True, None],
'radiative_rates_type':[True, None],
'line_interaction_type':[True, None],
'w_epsilon':[False, 1e-10],
'delta_treatment':[False, None],
'nlte':{ 'species':[False, []],
'coronal_approximation':[False, False],
'classical_nebular':[False, False]
}
},
'model':{ 'structure':{'type':[True, ['file|_:_|filename|_:_|'
'filetype|_:_|v_inner_boundary|_:_|v_outer_boundary',
'specific|_:_|velocity|_:_|density']],
'filename':[True, None],
'filetype':[True, None],
'v_inner_boundary':[False, '0 km/s'],
'v_outer_boundary':[False, 'inf km/s'],
'velocity':[True, None],
'density':{ 'type':[True, ['branch85_w7|_:_|w7_time_0'
'|_:_|w7_time_0|_:_|w7_time_0',
'exponential|_:_|time_0|_:_|rho_0|_:_|'
'v_0','power_law|_:_|time_0|_:_|rho_0'
'|_:_|v_0|_:_|exponent','uniform|_:_|value']],
'w7_time_0':[False, '0.000231481 day'],
'w7_rho_0':[False, '3e29 g/cm^3'],
'w7_v_0': [False, '1 km/s'],
'time_0':[True, None],
'rho_0':[True, None],
'v_0': [True, None],
'exponent': [True, None],
'value':[True, None]
}
},
'abundances':{ 'type':[True, ['file|_:_|filetype|_:_|'
'filename', 'uniform']],
'filename':[True, None],
'filetype':[False, None]
}
},
'montecarlo':{'seed':[False, 23111963],
'no_of_packets':[True, None],
'iterations':[True, None],
'black_body_sampling':{
'start': '1 angstrom',
'stop': '1000000 angstrom',
'num': '1.e+6',
},
'last_no_of_packets':[False, -1],
'no_of_virtual_packets':[False, 0],
'enable_reflective_inner_boundary':[False, False],
'inner_boundary_albedo':[False, 0.0],
'convergence_strategy':{ 'type':[True,
['damped|_:_|damping_constant|_:_|t_inner|_:_|'
't_rad|_:_|w|_:_|lock_t_inner_cycles|_:_|'
't_inner_update_exponent','specific|_:_|threshold'
'|_:_|fraction|_:_|hold_iterations|_:_|t_inner'
'|_:_|t_rad|_:_|w|_:_|lock_t_inner_cycles|_:_|'
'damping_constant|_:_|t_inner_update_exponent']],
't_inner_update_exponent':[False, -0.5],
'lock_t_inner_cycles':[False, 1],
'hold_iterations':[True, 3],
'fraction':[True, 0.8],
'damping_constant':[False, 0.5],
'threshold':[True, None],
't_inner':{ 'damping_constant':[False, 0.5],
'threshold': [False, None]
},
't_rad':{'damping_constant':[False, 0.5],
'threshold':[True, None]
},
'w':{'damping_constant': [False, 0.5],
'threshold': [True, None]
}
}
},
'spectrum':[True, None]
}
self.match_dicts(configDict, templatedictionary)
self.layout = QtGui.QVBoxLayout()
#Make tree
self.trmodel = TreeModel(templatedictionary)
self.colView = QtGui.QColumnView()
self.colView.setModel(self.trmodel)
#Five columns of width 256 each can be visible at once
self.colView.setFixedWidth(256*5)
self.colView.setItemDelegate(TreeDelegate(self))
self.layout.addWidget(self.colView)
#Recalculate button
button = QtGui.QPushButton('Recalculate')
button.setFixedWidth(90)
self.layout.addWidget(button)
button.clicked.connect(self.recalculate)
#Finally put them all in
self.setLayout(self.layout)
def match_dicts(self, dict1, dict2): #dict1<=dict2
"""Compare and combine two dictionaries.
If there are new keys in `dict1` then they are appended to `dict2`.
The `dict2` stores the values for the keys in `dict1` but it
first modifies them by taking the value and appending to a
list whose first item is either True or False, indicating if
that key is mandatory or not. The goal of this method is to
perform validation by inserting user provided values into the
template dictionary. After inserting user given values into the
template dictionary, all the keys have either default or user
provided values. Then it can be used to build a tree to be
shown in the ConfigEditor.
Parameters
----------
dict1: dictionary
The dictionary of user provided configuration.
dict2: dictionary
The template dictionary with all default values set. This
one may have some keys missing that are present in the
`dict1`. Such keys will be appended.
Raises
------
IOError
If the configuration file has an invalid value for a
key that can only take values from a predefined list,
then this error is raised.
"""
for key in dict1:
if key in dict2:
if isinstance(dict2[key], dict):
self.match_dicts(dict1[key], dict2[key])
elif isinstance(dict2[key], list):
if isinstance(dict2[key][1], list):
#options = dict2[key][1] #This is passed by reference.
#So copy the list manually.
options = [dict2[key][1][i] for i in range(
len(dict2[key][1]))]
for i in range(len(options)):
options[i] = options[i].split('|_:_|')[0]
optionselected = dict1[key]
if optionselected in options:
indexofselected = options.index(optionselected)
temp = dict2[key][1][0]
dict2[key][1][0] = dict2[key][1][indexofselected]
dict2[key][1][indexofselected] = temp
else:
print 'The selected and available options'
print optionselected
print options
raise exceptions.IOError("An invalid option was"+
" provided in the input file")
else:
dict2[key] = dict1[key]
else:
toappend = [False, dict1[key]]
dict2[key] = toappend
def recalculate(self):
"""Recalculate and display the model from the modified data in
the ConfigEditor.
"""
pass
class ModelViewer(QtGui.QWidget):
"""The widget that holds all the plots and tables that visualize
the data in the tardis model. This is also appended to the stacked
widget in the top level window.
"""
def __init__(self, tablecreator, parent=None):
"""Create all widgets that are children of ModelViewer."""
QtGui.QWidget.__init__(self, parent)
#Data structures
self.model = None
self.shell_info = {}
self.line_info = []
#functions
self.createTable = tablecreator
#Shells widget
self.shellWidget = self.make_shell_widget()
#Spectrum widget
self.spectrumWidget = self.make_spectrum_widget()
#Plot tab widget
self.plotTabWidget = QtGui.QTabWidget()
self.plotTabWidget.addTab(self.shellWidget,"&Shells")
self.plotTabWidget.addTab(self.spectrumWidget, "S&pectrum")
#Table widget
self.tablemodel = self.createTable([['Shell: '], ["Rad. temp", "Ws"]],
(1, 0))
self.tableview = QtGui.QTableView()
self.tableview.setMinimumWidth(200)
self.tableview.connect(self.tableview.verticalHeader(),
QtCore.SIGNAL('sectionClicked(int)'), self.graph.highlight_shell)
self.tableview.connect(self.tableview.verticalHeader(),
QtCore.SIGNAL('sectionDoubleClicked(int)'),
self.on_header_double_clicked)
#Label for text output
self.outputLabel = QtGui.QLabel()
self.outputLabel.setFrameStyle(QtGui.QFrame.StyledPanel |
QtGui.QFrame.Sunken)
self.outputLabel.setStyleSheet("QLabel{background-color:white;}")
#Group boxes
graphsBox = QtGui.QGroupBox("Visualized results")
textsBox = QtGui.QGroupBox("Model parameters")
tableBox = QtGui.QGroupBox("Tabulated results")
#For textbox
textlayout = QtGui.QHBoxLayout()
textlayout.addWidget(self.outputLabel)
tableslayout = QtGui.QVBoxLayout()
tableslayout.addWidget(self.tableview)
tableBox.setLayout(tableslayout)
visualayout = QtGui.QVBoxLayout()
visualayout.addWidget(self.plotTabWidget)
graphsBox.setLayout(visualayout)
self.layout = QtGui.QHBoxLayout()
self.layout.addWidget(graphsBox)
textntablelayout = QtGui.QVBoxLayout()
textsBox.setLayout(textlayout)
textntablelayout.addWidget(textsBox)
textntablelayout.addWidget(tableBox)
self.layout.addLayout(textntablelayout)
self.setLayout(self.layout)
def fill_output_label(self):
"""Read some data from tardis model and display on the label for
quick user access.
"""
labeltext = 'Iterations requested: {} <br/> Iterations executed: {}<br/>\
Model converged : {} <br/> Simulation Time : {} s <br/>\
Inner Temperature : {} K <br/> Number of packets : {}<br/>\
Inner Luminosity : {}'\
.format(self.model.iterations_max_requested,
self.model.iterations_executed,
'<font color="green"><b>True</b></font>' if
self.model.converged else
'<font color="red"><b>False</b></font>',
self.model.time_of_simulation.value,
self.model.t_inner.value,
self.model.current_no_of_packets,
self.model.luminosity_inner)
self.outputLabel.setText(labeltext)
def make_shell_widget(self):
"""Create the plot of the the shells and place it inside a
container widget. Return the container widget.
"""
#Widgets for plot of shells
self.graph = MatplotlibWidget(self.createTable, self, 'model')
self.graph_label = QtGui.QLabel('Select Property:')
self.graph_button = QtGui.QToolButton()
self.graph_button.setText('Rad. temp')
self.graph_button.setPopupMode(QtGui.QToolButton.MenuButtonPopup)
self.graph_button.setMenu(QtGui.QMenu(self.graph_button))
self.graph_button.menu().addAction('Rad. temp').triggered.connect(
self.change_graph_to_t_rads)
self.graph_button.menu().addAction('Ws').triggered.connect(
self.change_graph_to_ws)
#Layouts: bottom up
self.graph_subsublayout = QtGui.QHBoxLayout()
self.graph_subsublayout.addWidget(self.graph_label)
self.graph_subsublayout.addWidget(self.graph_button)
self.graph_sublayout = QtGui.QVBoxLayout()
self.graph_sublayout.addLayout(self.graph_subsublayout)
self.graph_sublayout.addWidget(self.graph)
containerWidget = QtGui.QWidget()
containerWidget.setLayout(self.graph_sublayout)
return containerWidget
def make_spectrum_widget(self):
"""Create the spectrum plot and associated buttons and append to
a container widget. Return the container widget.
"""
self.spectrum = MatplotlibWidget(self.createTable, self)
self.spectrum_label = QtGui.QLabel('Select Spectrum:')
self.spectrum_button = QtGui.QToolButton()
self.spectrum_button.setText('spec_flux_angstrom')
self.spectrum_button.setPopupMode(QtGui.QToolButton.MenuButtonPopup)
self.spectrum_button.setMenu(QtGui.QMenu(self.spectrum_button))
self.spectrum_button.menu().addAction('spec_flux_angstrom'
).triggered.connect(self.change_spectrum_to_spec_flux_angstrom)
self.spectrum_button.menu().addAction('spec_virtual_flux_angstrom'
).triggered.connect(self.change_spectrum_to_spec_virtual_flux_angstrom)
self.spectrum_span_button = QtGui.QPushButton('Show Wavelength Range')
self.spectrum_span_button.clicked.connect(self.spectrum.show_span)
self.spectrum_line_info_button = QtGui.QPushButton('Show Line Info')
self.spectrum_line_info_button.hide()
self.spectrum_line_info_button.clicked.connect(self.spectrum.show_line_info)
self.spectrum_subsublayout = QtGui.QHBoxLayout()
self.spectrum_subsublayout.addWidget(self.spectrum_span_button)
self.spectrum_subsublayout.addWidget(self.spectrum_label)
self.spectrum_subsublayout.addWidget(self.spectrum_button)
self.spectrum_sublayout = QtGui.QVBoxLayout()
self.spectrum_sublayout.addLayout(self.spectrum_subsublayout)
self.spectrum_sublayout.addWidget(self.spectrum_line_info_button)
self.spectrum_sublayout.addWidget(self.spectrum)
self.spectrum_sublayout.addWidget(self.spectrum.toolbar)
containerWidget = QtGui.QWidget()
containerWidget.setLayout(self.spectrum_sublayout)
return containerWidget
def update_data(self, model=None):
"""Associate the given model with the GUI and display results."""
if model:
self.change_model(model)
self.tablemodel.update_table()
for index in self.shell_info.keys():
self.shell_info[index].update_tables()
self.plot_model()
if self.graph_button.text == 'Ws':
self.change_graph_to_ws()
self.plot_spectrum()
if self.spectrum_button.text == 'spec_virtual_flux_angstrom':
self.change_spectrum_to_spec_virtual_flux_angstrom()
self.show()
def change_model(self, model):
"""Reset the model set in the GUI."""
self.model = model
self.tablemodel.arraydata = []
self.tablemodel.add_data(model.t_rads.value.tolist())
self.tablemodel.add_data(model.ws.tolist())
def change_spectrum_to_spec_virtual_flux_angstrom(self):
"""Change the spectrum data to the virtual spectrum."""
if self.model.spectrum_virtual.luminosity_density_lambda is None:
luminosity_density_lambda = np.zeros_like(
self.model.spectrum_virtual.wavelength)
else:
luminosity_density_lambda = \
self.model.spectrum_virtual.luminosity_density_lambda.value
self.change_spectrum(luminosity_density_lambda, 'spec_flux_angstrom')
def change_spectrum_to_spec_flux_angstrom(self):
"""Change spectrum data back from virtual spectrum. (See the
method above)."""
if self.model.spectrum.luminosity_density_lambda is None:
luminosity_density_lambda = np.zeros_like(
self.model.spectrum.wavelength)
else:
luminosity_density_lambda = \
self.model.spectrum.luminosity_density_lambda.value
self.change_spectrum(luminosity_density_lambda, 'spec_flux_angstrom')
def change_spectrum(self, data, name):
"""Replot the spectrum plot using the data provided. Called
when changing spectrum types. See the two methods above.
"""
self.spectrum_button.setText(name)
self.spectrum.dataplot[0].set_ydata(data)
self.spectrum.ax.relim()
self.spectrum.ax.autoscale()
self.spectrum.draw()
def plot_spectrum(self):
"""Plot the spectrum and add labels to the graph."""
self.spectrum.ax.clear()
self.spectrum.ax.set_title('Spectrum')
self.spectrum.ax.set_xlabel('Wavelength (A)')
self.spectrum.ax.set_ylabel('Intensity')
wavelength = self.model.spectrum.wavelength.value
if self.model.spectrum.luminosity_density_lambda is None:
luminosity_density_lambda = np.zeros_like(wavelength)
else:
luminosity_density_lambda =\
self.model.spectrum.luminosity_density_lambda.value
self.spectrum.dataplot = self.spectrum.ax.plot(wavelength,
luminosity_density_lambda, label='b')
self.spectrum.draw()
def change_graph_to_ws(self):
"""Change the shell plot to show dilution factor."""
self.change_graph(self.model.ws, 'Ws', '')
def change_graph_to_t_rads(self):
"""Change the graph back to radiation Temperature."""
self.change_graph(self.model.t_rads.value, 't_rads', '(K)')
def change_graph(self, data, name, unit):
"""Called to change the shell plot by the two methods above."""
self.graph_button.setText(name)
self.graph.dataplot[0].set_ydata(data)
self.graph.ax1.relim()
self.graph.ax1.autoscale()
self.graph.ax1.set_title(name + ' vs Shell')
self.graph.ax1.set_ylabel(name + ' ' + unit)
normalizer = colors.Normalize(vmin=data.min(), vmax=data.max())
color_map = plt.cm.ScalarMappable(norm=normalizer, cmap=plt.cm.jet)
color_map.set_array(data)
self.graph.cb.set_clim(vmin=data.min(), vmax=data.max())
self.graph.cb.update_normal(color_map)
if unit == '(K)':
unit = 'T (K)'
self.graph.cb.set_label(unit)
for i, item in enumerate(data):
self.shells[i].set_facecolor(color_map.to_rgba(item))
self.graph.draw()
def plot_model(self):
"""Plot the two graphs, the shell model and the line plot
both showing the radiation temperature and set labels.
"""
self.graph.ax1.clear()
self.graph.ax1.set_title('Rad. Temp vs Shell')
self.graph.ax1.set_xlabel('Shell Number')
self.graph.ax1.set_ylabel('Rad. Temp (K)')
self.graph.ax1.yaxis.get_major_formatter().set_powerlimits((0, 1))
self.graph.dataplot = self.graph.ax1.plot(
range(len(self.model.t_rads.value)), self.model.t_rads.value)
self.graph.ax2.clear()
self.graph.ax2.set_title('Shell View')
self.graph.ax2.set_xticklabels([])
self.graph.ax2.set_yticklabels([])
self.graph.ax2.grid = True
self.shells = []
t_rad_normalizer = colors.Normalize(vmin=self.model.t_rads.value.min(),
vmax=self.model.t_rads.value.max())
t_rad_color_map = plt.cm.ScalarMappable(norm=t_rad_normalizer,
cmap=plt.cm.jet)
t_rad_color_map.set_array(self.model.t_rads.value)
if self.graph.cb:
self.graph.cb.set_clim(vmin=self.model.t_rads.value.min(),
vmax=self.model.t_rads.value.max())
self.graph.cb.update_normal(t_rad_color_map)
else:
self.graph.cb = self.graph.figure.colorbar(t_rad_color_map)
self.graph.cb.set_label('T (K)')
self.graph.normalizing_factor = 0.2 * (
self.model.tardis_config.structure.r_outer.value[-1] -
self.model.tardis_config.structure.r_inner.value[0]) / (
self.model.tardis_config.structure.r_inner.value[0])
#self.graph.normalizing_factor = 8e-16
for i, t_rad in enumerate(self.model.t_rads.value):
r_inner = (self.model.tardis_config.structure.r_inner.value[i] *
self.graph.normalizing_factor)
r_outer = (self.model.tardis_config.structure.r_outer.value[i] *
self.graph.normalizing_factor)
self.shells.append(Shell(i, (0,0), r_inner, r_outer,
facecolor=t_rad_color_map.to_rgba(t_rad),
picker=self.graph.shell_picker))
self.graph.ax2.add_patch(self.shells[i])
self.graph.ax2.set_xlim(0,
self.model.tardis_config.structure.r_outer.value[-1] *
self.graph.normalizing_factor)
self.graph.ax2.set_ylim(0,
self.model.tardis_config.structure.r_outer.value[-1] *
self.graph.normalizing_factor)
self.graph.figure.tight_layout()
self.graph.draw()
def on_header_double_clicked(self, index):
"""Callback to get counts for different Z from table."""
self.shell_info[index] = ShellInfo(index, self.createTable, self)
class ShellInfo(QtGui.QDialog):
"""Dialog to display Shell abundances."""
def __init__(self, index, tablecreator, parent=None):
"""Create the widget to display shell info and set data."""
super(ShellInfo, self).__init__(parent)
self.createTable = tablecreator
self.parent = parent
self.shell_index = index
self.setGeometry(400, 150, 200, 400)
self.setWindowTitle('Shell %d Abundances' % (self.shell_index + 1))
self.atomstable = QtGui.QTableView()
self.ionstable = QtGui.QTableView()
self.levelstable = QtGui.QTableView()
self.atomstable.connect(self.atomstable.verticalHeader(),
QtCore.SIGNAL('sectionClicked(int)'),
self.on_atom_header_double_clicked)
self.table1_data = self.parent.model.tardis_config.abundances[
self.shell_index]
self.atomsdata = self.createTable([['Z = '], ['Count (Shell %d)' % (
self.shell_index + 1)]], iterate_header=(2, 0),
index_info=self.table1_data.index.values.tolist())
self.ionsdata = None
self.levelsdata = None
self.atomsdata.add_data(self.table1_data.values.tolist())
self.atomstable.setModel(self.atomsdata)
self.layout = QtGui.QHBoxLayout()
self.layout.addWidget(self.atomstable)
self.layout.addWidget(self.ionstable)
self.layout.addWidget(self.levelstable)
self.setLayout(self.layout)
self.ionstable.hide()
self.levelstable.hide()
self.show()
def on_atom_header_double_clicked(self, index):
"""Called when a header in the first column is clicked to show
ion populations."""
self.current_atom_index = self.table1_data.index.values.tolist()[index]
self.table2_data = self.parent.model.plasma_array.ion_number_density[
self.shell_index].ix[self.current_atom_index]
self.ionsdata = self.createTable([['Ion: '],
['Count (Z = %d)' % self.current_atom_index]],
iterate_header=(2, 0),
index_info=self.table2_data.index.values.tolist())
normalized_data = []
for item in self.table2_data.values:
normalized_data.append(float(item /
self.parent.model.tardis_config.number_densities[self.shell_index]
.ix[self.current_atom_index]))
self.ionsdata.add_data(normalized_data)
self.ionstable.setModel(self.ionsdata)
self.ionstable.connect(self.ionstable.verticalHeader(), QtCore.SIGNAL(
'sectionClicked(int)'),self.on_ion_header_double_clicked)
self.levelstable.hide()
self.ionstable.setColumnWidth(0, 120)
self.ionstable.show()
self.setGeometry(400, 150, 380, 400)
self.show()
def on_ion_header_double_clicked(self, index):
"""Called on double click of ion headers to show level populations."""
self.current_ion_index = self.table2_data.index.values.tolist()[index]
self.table3_data = self.parent.model.plasma_array.level_number_density[
self.shell_index].ix[self.current_atom_index, self.current_ion_index]
self.levelsdata = self.createTable([['Level: '],
['Count (Ion %d)' % self.current_ion_index]],
iterate_header=(2, 0),
index_info=self.table3_data.index.values.tolist())
normalized_data = []
for item in self.table3_data.values.tolist():
normalized_data.append(float(item /
self.table2_data.ix[self.current_ion_index]))
self.levelsdata.add_data(normalized_data)
self.levelstable.setModel(self.levelsdata)
self.levelstable.setColumnWidth(0, 120)
self.levelstable.show()
self.setGeometry(400, 150, 580, 400)
self.show()
def update_tables(self):
"""Update table data for shell info viewer."""
self.table1_data = self.parent.model.plasma_array.number_density[
self.shell_index]
self.atomsdata.index_info=self.table1_data.index.values.tolist()
self.atomsdata.arraydata = []
self.atomsdata.add_data(self.table1_data.values.tolist())
self.atomsdata.update_table()
self.ionstable.hide()
self.levelstable.hide()
self.setGeometry(400, 150, 200, 400)
self.show()
class LineInfo(QtGui.QDialog):
"""Dialog to show the line info used by spectrum widget."""
def __init__(self, parent, wavelength_start, wavelength_end, tablecreator):
"""Create the dialog and set data in it from the model.
Show widget."""
super(LineInfo, self).__init__(parent)
self.createTable = tablecreator
self.parent = parent
self.setGeometry(180 + len(self.parent.line_info) * 20, 150, 250, 400)
self.setWindowTitle('Line Interaction: %.2f - %.2f (A) ' % (
wavelength_start, wavelength_end,))
self.layout = QtGui.QVBoxLayout()
packet_nu_line_interaction = analysis.LastLineInteraction.from_model(
self.parent.model)
packet_nu_line_interaction.packet_filter_mode = 'packet_nu'
packet_nu_line_interaction.wavelength_start = wavelength_start * u.angstrom
packet_nu_line_interaction.wavelength_end = wavelength_end * u.angstrom
line_in_nu_line_interaction = analysis.LastLineInteraction.from_model(
self.parent.model)
line_in_nu_line_interaction.packet_filter_mode = 'line_in_nu'
line_in_nu_line_interaction.wavelength_start = wavelength_start * u.angstrom
line_in_nu_line_interaction.wavelength_end = wavelength_end * u.angstrom
self.layout.addWidget(LineInteractionTables(packet_nu_line_interaction,
self.parent.model.atom_data, 'filtered by frequency of packet',
self.createTable))
self.layout.addWidget(LineInteractionTables(line_in_nu_line_interaction,
self.parent.model.atom_data,
'filtered by frequency of line interaction', self.createTable))
self.setLayout(self.layout)
self.show()
def get_data(self, wavelength_start, wavelength_end):
"""Fetch line info data for the specified wavelength range
from the model and create ionstable.
"""
self.wavelength_start = wavelength_start * u.angstrom
self.wavelength_end = wavelength_end * u.angstrom
last_line_in_ids, last_line_out_ids = analysis.get_last_line_interaction(
self.wavelength_start, self.wavelength_end, self.parent.model)
self.last_line_in, self.last_line_out = (
self.parent.model.atom_data.lines.ix[last_line_in_ids],
self.parent.model.atom_data.lines.ix[last_line_out_ids])
self.grouped_lines_in, self.grouped_lines_out = (self.last_line_in.groupby(
['atomic_number', 'ion_number']),
self.last_line_out.groupby(['atomic_number', 'ion_number']))
self.ions_in, self.ions_out = (self.grouped_lines_in.groups.keys(),
self.grouped_lines_out.groups.keys())
self.ions_in.sort()
self.ions_out.sort()
self.header_list = []
self.ion_table = (self.grouped_lines_in.wavelength.count().astype(float) /
self.grouped_lines_in.wavelength.count().sum()).values.tolist()
for z, ion in self.ions_in:
self.header_list.append('Z = %d: Ion %d' % (z, ion))
def get_transition_table(self, lines, atom, ion):
"""Called by the two methods below to get transition table for
given lines, atom and ions.
"""
grouped = lines.groupby(['atomic_number', 'ion_number'])
transitions_with_duplicates = lines.ix[grouped.groups[(atom, ion)]
].groupby(['level_number_lower', 'level_number_upper']).groups
transitions = lines.ix[grouped.groups[(atom, ion)]
].drop_duplicates().groupby(['level_number_lower',
'level_number_upper']).groups
transitions_count = []
transitions_parsed = []
for item in transitions.values():
c = 0
for ditem in transitions_with_duplicates.values():
c += ditem.count(item[0])
transitions_count.append(c)
s = 0
for item in transitions_count:
s += item
for index in range(len(transitions_count)):
transitions_count[index] /= float(s)
for key, value in transitions.items():
transitions_parsed.append("%d-%d (%.2f A)" % (key[0], key[1],
self.parent.model.atom_data.lines.ix[value[0]]['wavelength']))
return transitions_parsed, transitions_count
def on_atom_clicked(self, index):
"""Create and show transition table for the clicked item in the
dialog created by the spectrum widget.
"""
self.transitionsin_parsed, self.transitionsin_count = (
self.get_transition_table(self.last_line_in,
self.ions_in[index][0], self.ions_in[index][1]))
self.transitionsout_parsed, self.transitionsout_count = (
self.get_transition_table(self.last_line_out,
self.ions_out[index][0], self.ions_out[index][1]))
self.transitionsindata = self.createTable([self.transitionsin_parsed,
['Lines In']])
self.transitionsoutdata = self.createTable([self.transitionsout_parsed,
['Lines Out']])
self.transitionsindata.add_data(self.transitionsin_count)
self.transitionsoutdata.add_data(self.transitionsout_count)
self.transitionsintable.setModel(self.transitionsindata)
self.transitionsouttable.setModel(self.transitionsoutdata)
self.transitionsintable.show()
self.transitionsouttable.show()
self.setGeometry(180 + len(self.parent.line_info) * 20, 150, 750, 400)
self.show()
def on_atom_clicked2(self, index):
"""Create and show transition table for the clicked item in the
dialog created by the spectrum widget.
"""
self.transitionsin_parsed, self.transitionsin_count = (
self.get_transition_table(self.last_line_in, self.ions_in[index][0],
self.ions_in[index][1]))
self.transitionsout_parsed, self.transitionsout_count = (
self.get_transition_table(self.last_line_out,
self.ions_out[index][0], self.ions_out[index][1]))
self.transitionsindata = self.createTable([self.transitionsin_parsed,
['Lines In']])
self.transitionsoutdata = self.createTable([self.transitionsout_parsed,
['Lines Out']])
self.transitionsindata.add_data(self.transitionsin_count)
self.transitionsoutdata.add_data(self.transitionsout_count)
self.transitionsintable2.setModel(self.transitionsindata)
self.transitionsouttable2.setModel(self.transitionsoutdata)
self.transitionsintable2.show()
self.transitionsouttable2.show()
self.setGeometry(180 + len(self.parent.line_info) * 20, 150, 750, 400)
self.show()
class LineInteractionTables(QtGui.QWidget):
"""Widget to hold the line interaction tables used by
LineInfo which in turn is used by spectrum widget.
"""
def __init__(self, line_interaction_analysis, atom_data, description,
tablecreator):
"""Create the widget and set data."""
super(LineInteractionTables, self).__init__()
self.createTable = tablecreator
self.text_description = QtGui.QLabel(str(description))
self.species_table = QtGui.QTableView()
self.transitions_table = QtGui.QTableView()
self.layout = QtGui.QHBoxLayout()
self.line_interaction_analysis = line_interaction_analysis
self.atom_data = atom_data
line_interaction_species_group = \
line_interaction_analysis.last_line_in.groupby(['atomic_number',
'ion_number'])
self.species_selected = sorted(
line_interaction_species_group.groups.keys())
species_symbols = [util.species_tuple_to_string(item,
atom_data) for item in self.species_selected]
species_table_model = self.createTable([species_symbols, ['Species']])
species_abundances = (
line_interaction_species_group.wavelength.count().astype(float) /
line_interaction_analysis.last_line_in.wavelength.count()).astype(float).tolist()
species_abundances = map(float, species_abundances)
species_table_model.add_data(species_abundances)
self.species_table.setModel(species_table_model)
line_interaction_species_group.wavelength.count()
self.layout.addWidget(self.text_description)
self.layout.addWidget(self.species_table)
self.species_table.connect(self.species_table.verticalHeader(),
QtCore.SIGNAL('sectionClicked(int)'), self.on_species_clicked)
self.layout.addWidget(self.transitions_table)
self.setLayout(self.layout)
self.show()
def on_species_clicked(self, index):
""""""
current_species = self.species_selected[index]
last_line_in = self.line_interaction_analysis.last_line_in
last_line_out = self.line_interaction_analysis.last_line_out
last_line_in_filter = (last_line_in.atomic_number == current_species[0]).values & \
(last_line_in.ion_number == current_species[1]).values
current_last_line_in = last_line_in[last_line_in_filter].reset_index()
current_last_line_out = last_line_out[last_line_in_filter].reset_index()
current_last_line_in['line_id_out'] = current_last_line_out['line_id']
last_line_in_string = []
last_line_count = []
grouped_line_interactions = current_last_line_in.groupby(['line_id',
'line_id_out'])
exc_deexc_string = 'exc. %d-%d (%.2f A) de-exc. %d-%d (%.2f A)'
for line_id, row in grouped_line_interactions.wavelength.count().iteritems():
current_line_in = self.atom_data.lines.ix[line_id[0]]
current_line_out = self.atom_data.lines.ix[line_id[1]]
last_line_in_string.append(exc_deexc_string % (
current_line_in['level_number_lower'],
current_line_in['level_number_upper'],
current_line_in['wavelength'],
current_line_out['level_number_upper'],
current_line_out['level_number_lower'],
current_line_out['wavelength']))
last_line_count.append(int(row))
last_line_in_model = self.createTable([last_line_in_string, [
'Num. pkts %d' % current_last_line_in.wavelength.count()]])
last_line_in_model.add_data(last_line_count)
self.transitions_table.setModel(last_line_in_model)
class Tardis(QtGui.QMainWindow):
"""Create the top level window for the GUI and wait for call to
display data.
"""
def __init__(self, tablemodel, config=None, atom_data=None, parent=None):
"""Create the top level window and all widgets it contains.
When called with no arguments it initializes the GUI in passive
mode. When a yaml config file and atom data are provided the
GUI starts in the active mode.
Parameters
---------
parent: None
Set to None by default and shouldn't be changed unless
you are developing something new.
config: string
yaml file with configuration information for TARDIS.
atom_data: string
hdf file that has the atom data.
Raises
------
TemporarilyUnavaliable
Raised when an attempt is made to start the active mode.
This will be removed when active mode is developed.
"""
#assumes that qt has already been initialized by starting IPython
#with the flag "--pylab=qt"
# app = QtCore.QCoreApplication.instance()
# if app is None:
# app = QtGui.QApplication([])
# try:
# from IPython.lib.guisupport import start_event_loop_qt4
# start_event_loop_qt4(app)
# except ImportError:
# app.exec_()
QtGui.QMainWindow.__init__(self, parent)
#path to icons folder
self.path = os.path.join(tardis.__path__[0],'gui','images')
#Check if configuration file was provided
self.mode = 'passive'
if config is not None:
self.mode = 'active'
#Statusbar
statusbr = self.statusBar()
lblstr = '<font color="red"><b>Calculation did not converge</b></font>'
self.successLabel = QtGui.QLabel(lblstr)
self.successLabel.setFrameStyle(QtGui.QFrame.StyledPanel |
QtGui.QFrame.Sunken)
statusbr.addPermanentWidget(self.successLabel)
self.modeLabel = QtGui.QLabel('Passive mode')
statusbr.addPermanentWidget(self.modeLabel)
statusbr.showMessage(self.mode, 5000)
statusbr.showMessage("Ready", 5000)
#Actions
quitAction = QtGui.QAction("&Quit", self)
quitAction.setIcon(QtGui.QIcon(os.path.join(self.path,
'closeicon.png')))
quitAction.triggered.connect(self.close)
self.viewMdv = QtGui.QAction("View &Model", self)
self.viewMdv.setIcon(QtGui.QIcon(os.path.join(self.path,
'mdvswitch.png')))
self.viewMdv.setCheckable(True)
self.viewMdv.setChecked(True)
self.viewMdv.setEnabled(False)
self.viewMdv.triggered.connect(self.switch_to_mdv)
self.viewForm = QtGui.QAction("&Edit Model", self)
self.viewForm.setIcon(QtGui.QIcon(os.path.join(self.path,
'formswitch.png')))
self.viewForm.setCheckable(True)
self.viewForm.setEnabled(False)
self.viewForm.triggered.connect(self.switch_to_form)
#Menubar
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(quitAction)
self.viewMenu = self.menuBar().addMenu("&View")
self.viewMenu.addAction(self.viewMdv)
self.viewMenu.addAction(self.viewForm)
self.helpMenu = self.menuBar().addMenu("&Help")
#Toolbar
fileToolbar = self.addToolBar("File")
fileToolbar.setObjectName("FileToolBar")
fileToolbar.addAction(quitAction)
viewToolbar = self.addToolBar("View")
viewToolbar.setObjectName("ViewToolBar")
viewToolbar.addAction(self.viewMdv)
viewToolbar.addAction(self.viewForm)
#Central Widget
self.stackedWidget = QtGui.QStackedWidget()
self.mdv = ModelViewer(tablemodel)
self.stackedWidget.addWidget(self.mdv)
#In case of active mode
if self.mode == 'active':
#Disabled currently
# self.formWidget = ConfigEditor(config)
# #scrollarea
# scrollarea = QtGui.QScrollArea()
# scrollarea.setWidget(self.formWidget)
# self.stackedWidget.addWidget(scrollarea)
# self.viewForm.setEnabled(True)
# self.viewMdv.setEnabled(True)
# model = run_tardis(config, atom_data)
# self.show_model(model)
raise TemporarilyUnavaliable("The active mode is under"
"development. Please use the passive mode for now.")
self.setCentralWidget(self.stackedWidget)
def show_model(self, model=None):
"""Set the provided model into the GUI and show the main window.
Parameters
----------
model: TARDIS model object
A keyword argument that takes the tardis model object.
"""
if model:
self.mdv.change_model(model)
if model.converged:
self.successLabel.setText('<font color="green">converged</font>')
if self.mode == 'active':
self.modeLabel.setText('Active Mode')
self.mdv.fill_output_label()
self.mdv.tableview.setModel(self.mdv.tablemodel)
self.mdv.plot_model()
self.mdv.plot_spectrum()
self.showMaximized()
def switch_to_mdv(self):
"""Switch the cental stacked widget to show the modelviewer."""
self.stackedWidget.setCurrentIndex(0)
self.viewForm.setChecked(False)
def switch_to_form(self):
"""Switch the cental stacked widget to show the ConfigEditor."""
self.stackedWidget.setCurrentIndex(1)
self.viewMdv.setChecked(False)
class TemporarilyUnavaliable(Exception):
"""Exception raised when creation of active mode of tardis is attempted."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| bsd-3-clause |
lowlevel86/wakefulness-forecaster | display/graph.py | 1 | 1982 | import datetime
import time
import matplotlib.pyplot as plt
from mvtrialanderror import *
true = 1
false = 0
valueToTargetSum = 0
minsInDay = 60*24
#find points along a line using x coordinates
def xCutsLine(x1Line, x2Line, y1Line, y2Line, xCutLocs):
xLine = []
yLine = []
for cutLoc in xCutLocs:
xLine.append(cutLoc)
yLine.append(y1Line - float(x1Line - cutLoc) / float(x1Line - x2Line) * (y1Line - y2Line))
return xLine, yLine
#get the day and minute timestamps from file
with open("wakeUpLog.txt") as f:
wakeUpLogData = f.readlines()
dayTsArray = []
minTsArray = []
for log in wakeUpLogData:
dayTsArray.append(int(log.split(" ")[0]))
minTsArray.append(int(log.split(" ")[2]))
#find the amount of time that passes between each consecutive log
xPoints = []
yPoints = []
for i in range(1, len(wakeUpLogData)):
#note that some days will be missing because a sleep-wake cycle
#could end or begin just when a day transitions to the next
if dayTsArray[i] - dayTsArray[i-1] == 1:
xPoints.append(dayTsArray[i] - dayTsArray[0])
yPoints.append(minTsArray[i] - minTsArray[i-1] - minsInDay)
topMostValue = max(yPoints)
bottomMostValue = min(yPoints)
xValues = [min(xPoints), max(xPoints)]
yValues = []
#find a line that represents the point data pattern
for iteration in range(1, 200):
yValues = guessValues(topMostValue, bottomMostValue, len(xValues), valueToTargetSum, iteration)
xLinePts, yLinePts = xCutsLine(xValues[0], xValues[1], yValues[0], yValues[1], xPoints)
if iteration == 1:
closerFurtherIni(yLinePts)
closerFurtherRetData = closerFurther(yLinePts, yPoints, len(yLinePts), 0.001, true)
valueToTargetSum = closerFurtherRetData[3]
print("valueToTargetSum: %f" % (valueToTargetSum))
plt.xlabel('Days')
plt.ylabel('Minutes')
plt.title('Amount of Time the Sleep-Wake Cycle Stretches or Contracts')
plt.plot(xPoints, yPoints, 'ro')
plt.plot([xValues[0], xValues[1]], [yValues[0], yValues[1]])
plt.draw()
plt.savefig('graph.png')
| mit |
tosolveit/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
Leberwurscht/Python-Guitar-Transcription-Aid | Analyze.py | 1 | 1678 | #!/usr/bin/env python
import gtk, numpy, scipy.ndimage
import matplotlib
import matplotlib.backends.backend_gtkcairo as mpl_backend
def get_power(data):
# apply window
window = numpy.hanning(len(data))
data *= window
# fft
power = numpy.abs(numpy.fft.rfft(data))**2.
return power
def smooth(array, window=3):
smoothed = numpy.convolve(array, numpy.hanning(window), "same")
return smoothed
def find_peaks(frq,power,max_window=3,min_window=3,height=0.0001):
max_filtered = scipy.ndimage.maximum_filter1d(power,size=max_window)
min_filtered = scipy.ndimage.minimum_filter1d(power,size=min_window)
maxima = numpy.logical_and(max_filtered==power, max_filtered-min_filtered>height)
maxima_indices = numpy.nonzero(maxima)[0]
return maxima_indices
class Analyze(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
fig = matplotlib.figure.Figure(figsize=(5,4))
self.ax = fig.add_subplot(111)
vbox = gtk.VBox()
self.add(vbox)
self.figure = mpl_backend.FigureCanvasGTK(fig)
self.figure.set_size_request(500,400)
self.navbar = mpl_backend.NavigationToolbar2Cairo(self.figure, self)
vbox.pack_start(self.figure)
vbox.pack_start(self.navbar, False, False)
def simple_plot(self, x, y, **kwargs):
self.ax.plot(x, y, **kwargs)
def add_line(self, pos, **kwargs):
self.ax.axvline(pos, **kwargs)
def plot_spectrum(self, frq, power):
self.simple_plot(frq, power, color="g")
# self.ax.plot(frq, 10*numpy.log10(power), color="r")
for semitone in xrange(-29,50):
f = 440. * ( 2.**(1./12.) )**semitone
self.ax.axvline(f, color="r")
for maximum in find_peaks(frq, power, 3, 3, 10):
self.ax.axvline(frq[maximum], color="k")
| gpl-3.0 |
navijo/FlOYBD | DataMining/weather/weatherFunctions.py | 2 | 10114 | import pyspark
import os.path
import numpy as np
import pandas as pd
import time
import json
from cylinders import CylindersKml
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
from pyspark.sql.functions import max, min, col, avg, count
from collections import defaultdict
from cassandra.cluster import Cluster
from datetime import datetime
def initEnvironment():
global sc, sql, cluster, session
conf = SparkConf()
conf.setMaster("spark://192.168.246.236:7077")
conf.setAppName("Spark Weather Functions")
conf.set("spark.cassandra.connection.host", "192.168.246.236")
conf.set("spark.executor.memory", "10g")
conf.set("spark.num.executors", "2")
spark_home = os.environ.get('SPARK_HOME', None)
sc = SparkContext(conf=conf)
sql = SQLContext(sc)
cluster = Cluster(['192.168.246.236'])
session = cluster.connect("dev")
def loadData():
global stations, monthly, daily
stations = sql.read.format("org.apache.spark.sql.cassandra").load(keyspace="dev", table="station")
monthly = sql.read.format("org.apache.spark.sql.cassandra").load(keyspace="dev", table="monthly_measurement")
daily = sql.read.format("org.apache.spark.sql.cassandra").load(keyspace="dev", table="clean_daily_measurement")
def getStationValueDate(pStationId, date):
startDate = datetime.strptime(date, '%Y-%M-%d')
stationData = daily[(daily.station_id == pStationId) & (daily.measure_date == date)]
jsonData = dataframeToJson(stationData)
if len(json.loads(jsonData)['measure_date']) > 0:
return jsonData
else:
return ""
def getMaxValues(pStationId):
stationMaxs = daily[daily.station_id == pStationId].groupBy('station_id').agg(max('max_temp'), max('max_pressure'),
max('med_temp'), max('min_temp'),
max('precip'), max('wind_med_vel'),
max('wind_streak'))
returnValue = stationMaxs.join(stations, on=['station_id'], how='left_outer')
return returnValue
def getMinValues(pStationId):
stationMins = daily[daily.station_id == pStationId].groupBy('station_id').agg(min('max_temp'), min('max_pressure'),
min('med_temp'), min('min_temp'),
min('precip'), min('wind_med_vel'),
min('wind_streak'))
returnValue = stationMins.join(stations, on=['station_id'], how='left_outer')
return returnValue
def getAvgValues(pStationId):
stationAvgs = daily[daily.station_id == pStationId].groupBy('station_id').agg(avg('max_temp'), avg('max_pressure'),
avg('med_temp'), avg('min_temp'),
avg('precip'), avg('wind_med_vel'),
avg('wind_streak'))
returnValue = stationAvgs.join(stations, on=['station_id'], how='left_outer')
return returnValue
def dataframeToJson(dataFrame):
pandas_df = dataFrame.toPandas()
return pandas_df.to_json()
def getAndInsertStationLimits(pStationId):
stationLimits = daily[daily.station_id == pStationId].groupBy('station_id').agg(
max('max_temp'), avg('max_temp'), min('max_temp'),
max('max_pressure'), avg('max_pressure'), min('max_pressure'),
max('min_pressure'), avg('min_pressure'), min('min_pressure'),
max('med_temp'), avg('med_temp'), min('med_temp'),
max('min_temp'), avg('min_temp'), min('min_temp'),
max('precip'), avg('precip'), min('precip'),
max('wind_med_vel'), avg('wind_med_vel'), min('wind_med_vel'),
max('wind_streak'), avg('wind_streak'), min('wind_streak'))
stationLimitsRenamed = stationLimits.select("max(max_temp)", "avg(max_temp)", "min(max_temp)", "max(max_pressure)",
"avg(max_pressure)"
, "min(max_pressure)", "max(med_temp)", "avg(med_temp)",
"min(med_temp)", "max(min_temp)", "avg(min_temp)", "min(min_temp)",
"max(precip)", "avg(precip)", "min(precip)", "max(wind_med_vel)",
"avg(wind_med_vel)", "min(wind_med_vel)", "max(wind_streak)",
"avg(wind_streak)", "min(wind_streak)", "max(min_pressure)",
"avg(min_pressure)", "min(min_pressure)").withColumnRenamed(
"max(max_temp)", "value1").withColumnRenamed(
"avg(max_temp)", "value2").withColumnRenamed(
"min(max_temp)", "value3").withColumnRenamed(
"max(max_pressure)", "value4").withColumnRenamed(
"avg(max_pressure)", "value5").withColumnRenamed(
"min(max_pressure)", "value6").withColumnRenamed(
"max(med_temp)", "value7").withColumnRenamed(
"avg(med_temp)", "value8").withColumnRenamed(
"min(med_temp)", "value9").withColumnRenamed(
"max(min_temp)", "value10").withColumnRenamed(
"avg(min_temp)", "value11").withColumnRenamed(
"min(min_temp)", "value12").withColumnRenamed(
"max(precip)", "value13").withColumnRenamed(
"avg(precip)", "value14").withColumnRenamed(
"min(precip)", "value15").withColumnRenamed(
"max(wind_med_vel)", "value16").withColumnRenamed(
"avg(wind_med_vel)", "value17").withColumnRenamed(
"min(wind_med_vel)", "value18").withColumnRenamed(
"max(wind_streak)", "value19").withColumnRenamed(
"avg(wind_streak)", "value20").withColumnRenamed(
"min(wind_streak)", "value21").withColumnRenamed(
"max(min_pressure)", "value22").withColumnRenamed(
"avg(min_pressure)", "value23").withColumnRenamed(
"min(min_pressure)", "value24").collect()
maxMaxTemp = stationLimitsRenamed[0].value1
avgMaxTemp = stationLimitsRenamed[0].value2
minMaxTemp = stationLimitsRenamed[0].value3
maxMaxPressure = stationLimitsRenamed[0].value4
avgMaxPressure = stationLimitsRenamed[0].value5
minMaxPressure = stationLimitsRenamed[0].value6
maxMedTemp = stationLimitsRenamed[0].value7
avgMedTemp = stationLimitsRenamed[0].value8
minMedTemp = stationLimitsRenamed[0].value9
maxMinTemp = stationLimitsRenamed[0].value10
avgMinTemp = stationLimitsRenamed[0].value11
minMinTemp = stationLimitsRenamed[0].value12
maxPrecip = stationLimitsRenamed[0].value13
avgPrecip = stationLimitsRenamed[0].value14
minPrecip = stationLimitsRenamed[0].value15
maxWindMedVel = stationLimitsRenamed[0].value16
avgWindMedVel = stationLimitsRenamed[0].value17
minWindMedVel = stationLimitsRenamed[0].value18
maxWindStreak = stationLimitsRenamed[0].value19
avgWindStreak = stationLimitsRenamed[0].value20
minWindStreak = stationLimitsRenamed[0].value21
maxMinPressure = stationLimitsRenamed[0].value22
avgMinPressure = stationLimitsRenamed[0].value23
minMinPressure = stationLimitsRenamed[0].value24
session.execute("INSERT INTO Station_limits (station_id,\"maxMaxTemp\",\"avgMaxTemp\",\"minMaxTemp\",\"maxMaxPressure\",\
\"avgMaxPressure\",\"minMaxPressure\",\"maxMedTemp\",\"avgMedTemp\",\"minMedTemp\",\"maxMinTemp\",\"avgMinTemp\",\"minMinTemp\",\"maxPrecip\",\
\"avgPrecip\",\"minPrecip\",\"maxWindMedVel\",\"avgWindMedVel\",\"minWindMedVel\",\"maxWindStreak\",\"avgWindStreak\",\"minWindStreak\",\"maxMinPressure\",\"avgMinPressure\",\"minMinPressure\") \
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
, [str(pStationId), maxMaxTemp, avgMaxTemp, minMaxTemp,
maxMaxPressure, avgMaxPressure, minMaxPressure,
maxMedTemp, avgMedTemp, minMedTemp,
maxMinTemp, avgMinTemp, minMinTemp,
maxPrecip, avgPrecip, minPrecip,
maxWindMedVel, avgWindMedVel, minWindMedVel,
maxWindStreak, avgWindStreak, minWindStreak,
maxMinPressure, avgMinPressure, minMinPressure])
def getAndInsertStationsLimits(isDebug):
if isDebug:
maxValues = getMaxValues("C629X")
maxValuesJson = dataframeToJson(maxValues)
minValues = getMinValues("C629X")
minValuesJson = dataframeToJson(minValues)
avgValues = getAvgValues("C629X")
avgValuesJson = dataframeToJson(avgValues)
else:
stationCount = 0
for station in stations.collect():
print(str(stationCount) + " : " + station.station_id)
getAndInsertStationLimits(station.station_id)
stationCount += 1
def prepareJson(data, pstationId):
stationData = dataframeToJson(stations[stations.station_id == pstationId])
stationData = json.loads(stationData)
latitude = stationData["latitude"]["0"]
longitude = stationData["longitude"]["0"]
coordinates = {"lat": latitude, "lng": longitude}
name = stationData["name"]["0"]
calculatedData = json.loads(data)
maxTemp = calculatedData["max_temp"]["0"]
minTemp = calculatedData["min_temp"]["0"]
temps = [maxTemp, minTemp]
finalData = {"name": name, "description": temps, "coordinates": coordinates, "extra": ""}
return finalData
if __name__ == "__main__":
initEnvironment()
loadData()
finalData = []
start_time = time.time()
getAndInsertStationsLimits(False)
print("--- %s seconds in total---" % (time.time() - start_time))
print("END")
| mit |
bsipocz/glue | glue/clients/histogram_client.py | 1 | 10954 | import numpy as np
from ..core.client import Client
from ..core import message as msg
from ..core.data import Data
from ..core.subset import RangeSubsetState
from ..core.exceptions import IncompatibleDataException, IncompatibleAttribute
from ..core.edit_subset_mode import EditSubsetMode
from .layer_artist import HistogramLayerArtist, LayerArtistContainer
from .util import visible_limits, update_ticks
from ..core.callback_property import CallbackProperty, add_callback
from ..core.util import lookup_class
class UpdateProperty(CallbackProperty):
"""Descriptor that calls client's sync_all() method when changed"""
def __init__(self, default, relim=False):
super(UpdateProperty, self).__init__(default)
self.relim = relim
def __set__(self, instance, value):
changed = value != self.__get__(instance)
super(UpdateProperty, self).__set__(instance, value)
if not changed:
return
instance.sync_all()
if self.relim:
instance._relim()
def update_on_true(func):
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if result:
args[0].sync_all()
return result
return wrapper
class HistogramClient(Client):
"""
A client class to display histograms
"""
normed = UpdateProperty(False)
cumulative = UpdateProperty(False)
autoscale = UpdateProperty(True)
nbins = UpdateProperty(30)
xlog = UpdateProperty(False, relim=True)
ylog = UpdateProperty(False)
def __init__(self, data, figure, artist_container=None):
super(HistogramClient, self).__init__(data)
self._artists = artist_container or LayerArtistContainer()
self._axes = figure.add_subplot(111)
self._component = None
self._xlim = {}
try:
self._axes.figure.set_tight_layout(True)
except AttributeError: # pragma: nocover (matplotlib < 1.1)
pass
@property
def bins(self):
""" An array of bin edges for the histogram.
Returns None if no histogram has been computed yet.
"""
for art in self._artists:
if not isinstance(art, HistogramLayerArtist):
continue
return art.x
@property
def axes(self):
return self._axes
@property
def xlimits(self):
try:
return self._xlim[self.component]
except KeyError:
pass
lo, hi = self._default_limits()
self._xlim[self.component] = lo, hi
return lo, hi
def _default_limits(self):
if self.component is None:
return 0, 1
lo, hi = np.inf, -np.inf
for a in self._artists:
try:
data = a.layer[self.component]
except IncompatibleAttribute:
continue
if data.size == 0:
continue
lo = min(lo, np.nanmin(data))
hi = max(hi, np.nanmax(data))
return lo, hi
@xlimits.setter
@update_on_true
def xlimits(self, value):
lo, hi = value
old = self.xlimits
if lo is None:
lo = old[0]
if hi is None:
hi = old[1]
self._xlim[self.component] = min(lo, hi), max(lo, hi)
self._relim()
return True
def layer_present(self, layer):
return layer in self._artists
def add_layer(self, layer):
if layer.data not in self.data:
raise IncompatibleDataException("Layer not in data collection")
self._ensure_layer_data_present(layer)
if self.layer_present(layer):
return self._artists[layer][0]
art = HistogramLayerArtist(layer, self._axes)
self._artists.append(art)
self._ensure_subsets_present(layer)
self._sync_layer(layer)
self._redraw()
return art
def _redraw(self):
self._axes.figure.canvas.draw()
def _ensure_layer_data_present(self, layer):
if layer.data is layer:
return
if not self.layer_present(layer.data):
self.add_layer(layer.data)
def _ensure_subsets_present(self, layer):
for subset in layer.data.subsets:
self.add_layer(subset)
@update_on_true
def remove_layer(self, layer):
if not self.layer_present(layer):
return
for a in self._artists.pop(layer):
a.clear()
if isinstance(layer, Data):
for subset in layer.subsets:
self.remove_layer(subset)
return True
@update_on_true
def set_layer_visible(self, layer, state):
if not self.layer_present(layer):
return
for a in self._artists[layer]:
a.visible = state
return True
def is_layer_visible(self, layer):
if not self.layer_present(layer):
return False
return any(a.visible for a in self._artists[layer])
def _update_axis_labels(self):
xlabel = self.component.label if self.component is not None else ''
if self.xlog:
xlabel = "Log %s" % xlabel
ylabel = 'N'
self._axes.set_xlabel(xlabel)
self._axes.set_ylabel(ylabel)
components = list(self._get_data_components('x'))
if components:
bins = update_ticks(self.axes, 'x',
components, False)
if bins is not None:
prev_bins = self.nbins
auto_bins = self._auto_nbin(calculate_only=True)
if prev_bins == auto_bins:
# try to assign a bin to each category,
# but only if self.nbins hasn't been overridden
# from auto_nbin
self.nbins = min(bins, 100)
self.xlimits = (-0.5, bins - 0.5)
def _get_data_components(self, coord):
""" Returns the components for each dataset for x and y axes.
"""
if coord == 'x':
attribute = self.component
else:
raise TypeError('coord must be x')
for data in self._data:
try:
yield data.get_component(attribute)
except IncompatibleAttribute:
pass
def _auto_nbin(self, calculate_only=False):
data = set(a.layer.data for a in self._artists)
if len(data) == 0:
return
dx = np.mean([d.size for d in data])
val = min(max(5, (dx / 1000) ** (1. / 3.) * 30), 100)
if not calculate_only:
self.nbins = val
return val
def _sync_layer(self, layer):
for a in self._artists[layer]:
a.lo, a.hi = self.xlimits
a.nbins = self.nbins
a.xlog = self.xlog
a.ylog = self.ylog
a.cumulative = self.cumulative
a.normed = self.normed
a.att = self._component
a.update()
def sync_all(self):
layers = set(a.layer for a in self._artists)
for l in layers:
self._sync_layer(l)
self._update_axis_labels()
if self.autoscale:
lim = visible_limits(self._artists, 1)
if lim is not None:
lo = 1e-5 if self.ylog else 0
hi = lim[1]
# pad the top
if self.ylog:
hi = lo * (hi / lo) ** 1.03
else:
hi *= 1.03
self._axes.set_ylim(lo, hi)
yscl = 'log' if self.ylog else 'linear'
self._axes.set_yscale(yscl)
self._redraw()
@property
def component(self):
return self._component
def set_component(self, component):
"""
Redefine which component gets plotted
Parameters
----------
component: ComponentID
The new component to plot
"""
if self._component is component:
return
self._component = component
self._auto_nbin()
self.sync_all()
self._relim()
def _relim(self):
lim = self.xlimits
if self.xlog:
lim = list(np.log10(lim))
if not np.isfinite(lim[0]):
lim[0] = 1e-5
if not np.isfinite(lim[1]):
lim[1] = 1
self._axes.set_xlim(lim)
self._redraw()
def _update_data(self, message):
self.sync_all()
def _update_subset(self, message):
self._sync_layer(message.subset)
self._redraw()
def _add_subset(self, message):
self.add_layer(message.sender)
assert self.layer_present(message.sender)
assert self.is_layer_visible(message.sender)
def _remove_data(self, message):
self.remove_layer(message.data)
def _remove_subset(self, message):
self.remove_layer(message.subset)
def apply_roi(self, roi):
x, _ = roi.to_polygon()
lo = min(x)
hi = max(x)
# expand roi to match bin edges
bins = self.bins
if lo >= bins.min():
lo = bins[bins <= lo].max()
if hi <= bins.max():
hi = bins[bins >= hi].min()
if self.xlog:
lo = 10 ** lo
hi = 10 ** hi
state = RangeSubsetState(lo, hi)
state.att = self.component
mode = EditSubsetMode()
visible = [d for d in self.data if self.is_layer_visible(d)]
focus = visible[0] if len(visible) > 0 else None
mode.update(self.data, state, focus_data=focus)
def register_to_hub(self, hub):
dfilter = lambda x: x.sender.data in self._artists
dcfilter = lambda x: x.data in self._artists
subfilter = lambda x: x.subset in self._artists
hub.subscribe(self,
msg.SubsetCreateMessage,
handler=self._add_subset,
filter=dfilter)
hub.subscribe(self,
msg.SubsetUpdateMessage,
handler=self._update_subset,
filter=subfilter)
hub.subscribe(self,
msg.SubsetDeleteMessage,
handler=self._remove_subset)
hub.subscribe(self,
msg.DataUpdateMessage,
handler=self._update_data,
filter=dfilter)
hub.subscribe(self,
msg.DataCollectionDeleteMessage,
handler=self._remove_data)
def restore_layers(self, layers, context):
for layer in layers:
lcls = lookup_class(layer.pop('_type'))
if lcls != HistogramLayerArtist:
raise ValueError("Cannot restore layers of type %s" % lcls)
data_or_subset = context.object(layer.pop('layer'))
result = self.add_layer(data_or_subset)
result.properties = layer
| bsd-3-clause |
mne-tools/mne-tools.github.io | stable/_downloads/63cab32016602394f025dbe0ed7f501b/30_filtering_resampling.py | 10 | 13855 | # -*- coding: utf-8 -*-
"""
.. _tut-filter-resample:
Filtering and resampling data
=============================
This tutorial covers filtering and resampling, and gives examples of how
filtering can be used for artifact repair.
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`. We'll also crop the data to 60 seconds
(to save memory on the documentation server):
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(0, 60).load_data() # use just 60 seconds of data, to save memory
###############################################################################
# Background on filtering
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# A filter removes or attenuates parts of a signal. Usually, filters act on
# specific *frequency ranges* of a signal — for example, suppressing all
# frequency components above or below a certain cutoff value. There are *many*
# ways of designing digital filters; see :ref:`disc-filtering` for a longer
# discussion of the various approaches to filtering physiological signals in
# MNE-Python.
#
#
# Repairing artifacts by filtering
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Artifacts that are restricted to a narrow frequency range can sometimes
# be repaired by filtering the data. Two examples of frequency-restricted
# artifacts are slow drifts and power line noise. Here we illustrate how each
# of these can be repaired by filtering.
#
#
# Slow drifts
# ~~~~~~~~~~~
#
# Low-frequency drifts in raw data can usually be spotted by plotting a fairly
# long span of data with the :meth:`~mne.io.Raw.plot` method, though it is
# helpful to disable channel-wise DC shift correction to make slow drifts
# more readily visible. Here we plot 60 seconds, showing all the magnetometer
# channels:
mag_channels = mne.pick_types(raw.info, meg='mag')
raw.plot(duration=60, order=mag_channels, proj=False,
n_channels=len(mag_channels), remove_dc=False)
###############################################################################
# A half-period of this slow drift appears to last around 10 seconds, so a full
# period would be 20 seconds, i.e., :math:`\frac{1}{20} \mathrm{Hz}`. To be
# sure those components are excluded, we want our highpass to be *higher* than
# that, so let's try :math:`\frac{1}{10} \mathrm{Hz}` and :math:`\frac{1}{5}
# \mathrm{Hz}` filters to see which works best:
for cutoff in (0.1, 0.2):
raw_highpass = raw.copy().filter(l_freq=cutoff, h_freq=None)
fig = raw_highpass.plot(duration=60, order=mag_channels, proj=False,
n_channels=len(mag_channels), remove_dc=False)
fig.subplots_adjust(top=0.9)
fig.suptitle('High-pass filtered at {} Hz'.format(cutoff), size='xx-large',
weight='bold')
###############################################################################
# Looks like 0.1 Hz was not quite high enough to fully remove the slow drifts.
# Notice that the text output summarizes the relevant characteristics of the
# filter that was created. If you want to visualize the filter, you can pass
# the same arguments used in the call to :meth:`raw.filter()
# <mne.io.Raw.filter>` above to the function :func:`mne.filter.create_filter`
# to get the filter parameters, and then pass the filter parameters to
# :func:`mne.viz.plot_filter`. :func:`~mne.filter.create_filter` also requires
# parameters ``data`` (a :class:`NumPy array <numpy.ndarray>`) and ``sfreq``
# (the sampling frequency of the data), so we'll extract those from our
# :class:`~mne.io.Raw` object:
filter_params = mne.filter.create_filter(raw.get_data(), raw.info['sfreq'],
l_freq=0.2, h_freq=None)
###############################################################################
# Notice that the output is the same as when we applied this filter to the data
# using :meth:`raw.filter() <mne.io.Raw.filter>`. You can now pass the filter
# parameters (and the sampling frequency) to :func:`~mne.viz.plot_filter` to
# plot the filter:
mne.viz.plot_filter(filter_params, raw.info['sfreq'], flim=(0.01, 5))
###############################################################################
# .. _tut-section-line-noise:
#
# Power line noise
# ~~~~~~~~~~~~~~~~
#
# Power line noise is an environmental artifact that manifests as persistent
# oscillations centered around the `AC power line frequency`_. Power line
# artifacts are easiest to see on plots of the spectrum, so we'll use
# :meth:`~mne.io.Raw.plot_psd` to illustrate. We'll also write a little
# function that adds arrows to the spectrum plot to highlight the artifacts:
def add_arrows(axes):
# add some arrows at 60 Hz and its harmonics
for ax in axes:
freqs = ax.lines[-1].get_xdata()
psds = ax.lines[-1].get_ydata()
for freq in (60, 120, 180, 240):
idx = np.searchsorted(freqs, freq)
# get ymax of a small region around the freq. of interest
y = psds[(idx - 4):(idx + 5)].max()
ax.arrow(x=freqs[idx], y=y + 18, dx=0, dy=-12, color='red',
width=0.1, head_width=3, length_includes_head=True)
fig = raw.plot_psd(fmax=250, average=True)
add_arrows(fig.axes[:2])
###############################################################################
# It should be evident that MEG channels are more susceptible to this kind of
# interference than EEG that is recorded in the magnetically shielded room.
# Removing power-line noise can be done with a notch filter,
# applied directly to the :class:`~mne.io.Raw` object, specifying an array of
# frequencies to be attenuated. Since the EEG channels are relatively
# unaffected by the power line noise, we'll also specify a ``picks`` argument
# so that only the magnetometers and gradiometers get filtered:
meg_picks = mne.pick_types(raw.info, meg=True)
freqs = (60, 120, 180, 240)
raw_notch = raw.copy().notch_filter(freqs=freqs, picks=meg_picks)
for title, data in zip(['Un', 'Notch '], [raw, raw_notch]):
fig = data.plot_psd(fmax=250, average=True)
fig.subplots_adjust(top=0.85)
fig.suptitle('{}filtered'.format(title), size='xx-large', weight='bold')
add_arrows(fig.axes[:2])
###############################################################################
# :meth:`~mne.io.Raw.notch_filter` also has parameters to control the notch
# width, transition bandwidth and other aspects of the filter. See the
# docstring for details.
#
# It's also possible to try to use a spectrum fitting routine to notch filter.
# In principle it can automatically detect the frequencies to notch, but our
# implementation generally does not do so reliably, so we specify the
# frequencies to remove instead, and it does a good job of removing the
# line noise at those frequencies:
raw_notch_fit = raw.copy().notch_filter(
freqs=freqs, picks=meg_picks, method='spectrum_fit', filter_length='10s')
for title, data in zip(['Un', 'spectrum_fit '], [raw, raw_notch_fit]):
fig = data.plot_psd(fmax=250, average=True)
fig.subplots_adjust(top=0.85)
fig.suptitle('{}filtered'.format(title), size='xx-large', weight='bold')
add_arrows(fig.axes[:2])
###############################################################################
# Resampling
# ^^^^^^^^^^
#
# EEG and MEG recordings are notable for their high temporal precision, and are
# often recorded with sampling rates around 1000 Hz or higher. This is good
# when precise timing of events is important to the experimental design or
# analysis plan, but also consumes more memory and computational resources when
# processing the data. In cases where high-frequency components of the signal
# are not of interest and precise timing is not needed (e.g., computing EOG or
# ECG projectors on a long recording), downsampling the signal can be a useful
# time-saver.
#
# In MNE-Python, the resampling methods (:meth:`raw.resample()
# <mne.io.Raw.resample>`, :meth:`epochs.resample() <mne.Epochs.resample>` and
# :meth:`evoked.resample() <mne.Evoked.resample>`) apply a low-pass filter to
# the signal to avoid `aliasing`_, so you don't need to explicitly filter it
# yourself first. This built-in filtering that happens when using
# :meth:`raw.resample() <mne.io.Raw.resample>`, :meth:`epochs.resample()
# <mne.Epochs.resample>`, or :meth:`evoked.resample() <mne.Evoked.resample>` is
# a brick-wall filter applied in the frequency domain at the `Nyquist
# frequency`_ of the desired new sampling rate. This can be clearly seen in the
# PSD plot, where a dashed vertical line indicates the filter cutoff; the
# original data had an existing lowpass at around 172 Hz (see
# ``raw.info['lowpass']``), and the data resampled from 600 Hz to 200 Hz gets
# automatically lowpass filtered at 100 Hz (the `Nyquist frequency`_ for a
# target rate of 200 Hz):
raw_downsampled = raw.copy().resample(sfreq=200)
for data, title in zip([raw, raw_downsampled], ['Original', 'Downsampled']):
fig = data.plot_psd(average=True)
fig.subplots_adjust(top=0.9)
fig.suptitle(title)
plt.setp(fig.axes, xlim=(0, 300))
###############################################################################
# Because resampling involves filtering, there are some pitfalls to resampling
# at different points in the analysis stream:
#
# - Performing resampling on :class:`~mne.io.Raw` data (*before* epoching) will
# negatively affect the temporal precision of Event arrays, by causing
# `jitter`_ in the event timing. This reduced temporal precision will
# propagate to subsequent epoching operations.
#
# - Performing resampling *after* epoching can introduce edge artifacts *on
# every epoch*, whereas filtering the :class:`~mne.io.Raw` object will only
# introduce artifacts at the start and end of the recording (which is often
# far enough from the first and last epochs to have no affect on the
# analysis).
#
# The following section suggests best practices to mitigate both of these
# issues.
#
#
# Best practices
# ~~~~~~~~~~~~~~
#
# To avoid the reduction in temporal precision of events that comes with
# resampling a :class:`~mne.io.Raw` object, and also avoid the edge artifacts
# that come with filtering an :class:`~mne.Epochs` or :class:`~mne.Evoked`
# object, the best practice is to:
#
# 1. low-pass filter the :class:`~mne.io.Raw` data at or below
# :math:`\frac{1}{3}` of the desired sample rate, then
#
# 2. decimate the data after epoching, by either passing the ``decim``
# parameter to the :class:`~mne.Epochs` constructor, or using the
# :meth:`~mne.Epochs.decimate` method after the :class:`~mne.Epochs` have
# been created.
#
# .. warning::
# The recommendation for setting the low-pass corner frequency at
# :math:`\frac{1}{3}` of the desired sample rate is a fairly safe rule of
# thumb based on the default settings in :meth:`raw.filter()
# <mne.io.Raw.filter>` (which are different from the filter settings used
# inside the :meth:`raw.resample() <mne.io.Raw.resample>` method). If you
# use a customized lowpass filter (specifically, if your transition
# bandwidth is wider than 0.5× the lowpass cutoff), downsampling to 3× the
# lowpass cutoff may still not be enough to avoid `aliasing`_, and
# MNE-Python will not warn you about it (because the :class:`raw.info
# <mne.Info>` object only keeps track of the lowpass cutoff, not the
# transition bandwidth). Conversely, if you use a steeper filter, the
# warning may be too sensitive. If you are unsure, plot the PSD of your
# filtered data *before decimating* and ensure that there is no content in
# the frequencies above the `Nyquist frequency`_ of the sample rate you'll
# end up with *after* decimation.
#
# Note that this method of manually filtering and decimating is exact only when
# the original sampling frequency is an integer multiple of the desired new
# sampling frequency. Since the sampling frequency of our example data is
# 600.614990234375 Hz, ending up with a specific sampling frequency like (say)
# 90 Hz will not be possible:
current_sfreq = raw.info['sfreq']
desired_sfreq = 90 # Hz
decim = np.round(current_sfreq / desired_sfreq).astype(int)
obtained_sfreq = current_sfreq / decim
lowpass_freq = obtained_sfreq / 3.
raw_filtered = raw.copy().filter(l_freq=None, h_freq=lowpass_freq)
events = mne.find_events(raw_filtered)
epochs = mne.Epochs(raw_filtered, events, decim=decim)
print('desired sampling frequency was {} Hz; decim factor of {} yielded an '
'actual sampling frequency of {} Hz.'
.format(desired_sfreq, decim, epochs.info['sfreq']))
###############################################################################
# If for some reason you cannot follow the above-recommended best practices,
# you should at the very least either:
#
# 1. resample the data *after* epoching, and make your epochs long enough that
# edge effects from the filtering do not affect the temporal span of the
# epoch that you hope to analyze / interpret; or
#
# 2. perform resampling on the :class:`~mne.io.Raw` object and its
# corresponding Events array *simultaneously* so that they stay more or less
# in synch. This can be done by passing the Events array as the
# ``events`` parameter to :meth:`raw.resample() <mne.io.Raw.resample>`.
#
#
# .. LINKS
#
# .. _`AC power line frequency`:
# https://en.wikipedia.org/wiki/Mains_electricity
# .. _`aliasing`: https://en.wikipedia.org/wiki/Anti-aliasing_filter
# .. _`jitter`: https://en.wikipedia.org/wiki/Jitter
# .. _`Nyquist frequency`: https://en.wikipedia.org/wiki/Nyquist_frequency
| bsd-3-clause |
kmunve/TSanalysis | Crocus/crocus_forcing_nc.py | 1 | 36359 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from netCDF4 import Dataset, num2date
from string import Template
from datetime import datetime
'''
Create a forcing netcdf file for the snow pack model Crocus.
'''
class CrocusForcing:
def __init__(self, no_points=1, filename=None, opt_param=[], source="Unspecified"):
'''
TODO: add a plotting routine to view all parameters.
:param no_points: the number of points/stations that should be modeled
:param filename: if given an existing file will be opened to append data
:param opt_param: list containing optional parameters that can be set
These are:
- relative humidity (HUMREL)
- nebulosity (NEB)
- wind direction (Wind_DIR)
:param source: Unknown, eklima or arome - TODO: make an Enum
:return: creates FORCING.nc
'''
self._set_crocus_arome_lut()
self._set_crocus_eklima_lut()
if filename is None:
# Set general parameters
self.fill_value = -9999999.0
# create a file (Dataset object, also the root group).
self.rootgrp = Dataset('FORCING.nc', 'w', format='NETCDF3_CLASSIC') # TODO: should be changed to NETCDF4 once Surfex8 is ready
##############
# Dimensions #
##############
self.time_dim = self.rootgrp.createDimension('time', None)
self.number_of_points_dim = self.rootgrp.createDimension('Number_of_points', no_points)
#####################
# Global attributes #
#####################
self.rootgrp.description = "SURFEX/Crocus forcing file"
self.rootgrp.history = "Created " + datetime.now().isoformat()
if source == "arome":
self.rootgrp.source = "AROME MetCoop - NWP model"
elif source == "eklima":
self.rootgrp.source = "www.eklima.no - wsKlima API"
else:
self.rootgrp.source = "unspecified"
#############
# Variables #
#############
###########
# Scalars #
###########
self.forc_time_step_v = self.rootgrp.createVariable('FRC_TIME_STP','f8',fill_value=self.fill_value)
self.forc_time_step_v.units = 's'
self.forc_time_step_v.long_name = 'Forcing_Time_Step'
######
# 1D #
######
self.time_v = self.rootgrp.createVariable('time', 'f8', ('time',), fill_value=self.fill_value)
# depends on FORC_TIME_STP units
self.time_v.units = 'hours/seconds since '
self.time_v.long_name = 'time'
if source == "arome":
self.time_v.derived_from_arome = self.crocus_arome_lut['time']
elif source == "eklima":
self.time_v.derived_from_eklima = self.crocus_eklima_lut['time']
self.lat_v = self.rootgrp.createVariable('LAT', 'f8', ('Number_of_points',), fill_value=self.fill_value)
self.lat_v.units = 'degrees_north'
self.lat_v.long_name = 'latitude'
if source == "arome":
self.lat_v.derived_from_arome = self.crocus_arome_lut['LAT']
elif source == "eklima":
self.lat_v.derived_from_eklima = self.crocus_eklima_lut['LAT']
self.lon_v = self.rootgrp.createVariable('LON', 'f8', ('Number_of_points',), fill_value=self.fill_value)
self.lon_v.units = 'degrees_east'
self.lon_v.long_name = 'longitude'
if source == "arome":
self.lon_v.derived_from_arome = self.crocus_arome_lut['LON']
elif source == "eklima":
self.lon_v.derived_from_eklima = self.crocus_eklima_lut['LON']
if 'aspect' in opt_param:
self.aspect_v = self.rootgrp.createVariable('aspect', 'f8', ('Number_of_points'),fill_value=self.fill_value)
self.aspect_v.units = 'degrees from north'
self.aspect_v.long_name = 'slope aspect'
if 'slope' in opt_param:
self.slope_v = self.rootgrp.createVariable('slope','f8',('Number_of_points',),fill_value=self.fill_value)
self.slope_v.units = 'degrees from horizontal'
self.slope_v.long_name = 'slope angle'
self.uref_v = self.rootgrp.createVariable('UREF','f8',('Number_of_points',),fill_value=self.fill_value)
self.uref_v.units = 'm'
self.uref_v.long_name = 'Reference_Height_for_Wind'
self.zref_v = self.rootgrp.createVariable('ZREF','f8',('Number_of_points',),fill_value=self.fill_value)
self.zref_v.units = 'm'
self.zref_v.long_name = 'Reference_Height'
self.zs_v = self.rootgrp.createVariable('ZS','f8',('Number_of_points',),fill_value=self.fill_value)
self.zs_v.units = 'm'
self.zs_v.long_name = 'altitude'
if source == "arome":
self.zs_v.derived_from_arome = self.crocus_arome_lut['ZS']
elif source == "eklima":
self.zs_v.derived_from_eklima = self.crocus_eklima_lut['ZS']
######
# 2D #
######
if 'CO2air' in opt_param:
self.co2_air_v = self.rootgrp.createVariable('CO2air','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.co2_air_v.units = 'kg/m3'
self.co2_air_v.long_name = 'Near_Surface_CO2_Concentration'
if source == "arome":
self.co2_air_v.derived_from_arome = self.crocus_arome_lut['CO2air']
elif source == "eklima":
self.co2_air_v.derived_from_eklima = self.crocus_eklima_lut['CO2air']
self.dir_sw_down_v = self.rootgrp.createVariable('DIR_SWdown','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.dir_sw_down_v.units = 'W/m2'
self.dir_sw_down_v.long_name = 'Surface_Indicent_Direct_Shortwave_Radiation'
if source == "arome":
self.dir_sw_down_v.derived_from_arome = self.crocus_arome_lut['DIR_SWdown']
elif source == "eklima":
self.dir_sw_down_v.derived_from_eklima = self.crocus_eklima_lut['DIR_SWdown']
if 'HUMREL' in opt_param:
self.hum_rel_v = self.rootgrp.createVariable('HUMREL','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.hum_rel_v.units = '%'
self.hum_rel_v.long_name = 'Relative Humidity'
if source == "arome":
self.hum_rel_v.derived_from_arome = self.crocus_arome_lut['HUMREL']
elif source == "eklima":
self.hum_rel_v.derived_from_eklima = self.crocus_eklima_lut['HUMREL']
self.lw_down_v = self.rootgrp.createVariable('LWdown','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.lw_down_v.units = 'W/m2'
self.lw_down_v.long_name = 'Surface_Incident_Longwave_Radiation'
if source == "arome":
self.lw_down_v.derived_from_arome = self.crocus_arome_lut['LWdown']
elif source == "eklima":
self.lw_down_v.derived_from_eklima = self.crocus_eklima_lut['LWdown']
if 'NEB' in opt_param:
self.neb_v = self.rootgrp.createVariable('NEB','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.neb_v.units = 'between 0 and 1'
self.neb_v.long_name = 'Nebulosity'
if source == "arome":
self.neb_v.derived_from_arome = self.crocus_arome_lut['NEB']
elif source == "eklima":
self.neb_v.derived_from_eklima = self.crocus_eklima_lut['NEB']
self.ps_surf_v = self.rootgrp.createVariable('PSurf','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.ps_surf_v.units = 'Pa'
self.ps_surf_v.long_name = 'Surface_Pressure'
if source == "arome":
self.ps_surf_v.derived_from_arome = self.crocus_arome_lut['PSurf']
elif source == "eklima":
self.ps_surf_v.derived_from_eklima = self.crocus_eklima_lut['PSurf']
self.q_air_v = self.rootgrp.createVariable('Qair','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.q_air_v.units = 'Kg/Kg'
self.q_air_v.long_name = 'Near_Surface_Specific_Humidity'
if source == "arome":
self.q_air_v.derived_from_arome = self.crocus_arome_lut['Qair']
elif source == "eklima":
self.q_air_v.derived_from_eklima = self.crocus_eklima_lut['Qair']
self.rain_fall_v = self.rootgrp.createVariable('Rainf','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.rain_fall_v.units = 'kg/m2/s'
self.rain_fall_v.long_name = 'Rainfall_Rate'
if source == "arome":
self.rain_fall_v.derived_from_arome = self.crocus_arome_lut['Rainf']
elif source == "eklima":
self.rain_fall_v.derived_from_eklima = self.crocus_eklima_lut['Rainf']
self.sca_sw_down_v = self.rootgrp.createVariable('SCA_SWdown','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.sca_sw_down_v.units = 'W/m2'
self.sca_sw_down_v.long_name = 'Surface_Incident_Diffuse_Shortwave_Radiation'
if source == "arome":
self.sca_sw_down_v.derived_from_arome = self.crocus_arome_lut['SCA_SWdown']
elif source == "eklima":
self.sca_sw_down_v.derived_from_eklima = self.crocus_eklima_lut['SCA_SWdown']
self.snow_fall_v = self.rootgrp.createVariable('Snowf','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.snow_fall_v.units = 'kg/m2/s'
self.snow_fall_v.long_name = 'Snowfall_Rate'
if source == "arome":
self.snow_fall_v.derived_from_arome = self.crocus_arome_lut['Snowf']
elif source == "eklima":
self.snow_fall_v.derived_from_eklima = self.crocus_eklima_lut['Snowf']
self.tair_v = self.rootgrp.createVariable('Tair','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.tair_v.units = 'K'
self.tair_v.long_name = 'Near_Surface_Air_Temperature'
self.tair_v.derived_from_arome = 'air_temperature_2m'
if source == "arome":
self.tair_v.derived_from_arome = self.crocus_arome_lut['Tair']
elif source == "eklima":
self.tair_v.derived_from_eklima = self.crocus_eklima_lut['Tair']
self.wind_v = self.rootgrp.createVariable('Wind','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.wind_v.units = 'm/s'
self.wind_v.long_name = 'Wind_Speed'
if source == "arome":
self.wind_v.derived_from_arome = self.crocus_arome_lut['Wind']
elif source == "eklima":
self.wind_v.derived_from_eklima = self.crocus_eklima_lut['Wind']
if 'Wind_DIR' in opt_param:
self.wind_dir_v = self.rootgrp.createVariable('Wind_DIR','f8',('time', 'Number_of_points',),fill_value=self.fill_value)
self.wind_dir_v.units = 'deg'
self.wind_dir_v.long_name = 'Wind_Direction'
if source == "arome":
self.wind_dir_v.derived_from_arome = self.crocus_arome_lut['Wind_DIR']
elif source == "eklima":
self.wind_dir_v.derived_from_eklima = self.crocus_eklima_lut['Wind_DIR']
else:
self.rootgrp = Dataset(filename, 'a')
def close(self):
"""
Closes netCDF file after writing.
"""
self.rootgrp.close()
def set_variable(self, var):
pass
def _set_crocus_arome_lut(self):
# TODO: cross-check units
# TODO: cross-check time conversions and time reference
# Look-up table between Crocus FORCING.nc and arome_metcoop*test*.nc
self.crocus_arome_lut = {'time': 'time', # seconds since : seconds since
'LAT': 'latitude', # degrees_north : degrees_north - ok
'LON': 'longitude', # degrees_east : degrees_east - ok
'PSurf': 'surface_air_pressure', # Pa : Pa - ok
'Tair': 'air_temperature_2m', # : K : K - ok
'HUMREL': 'relative_humidity_2m', # % : 1
'LWdown': 'integral_of_surface_downwelling_longwave_flux_in_air_wrt_time', # W/m2 : W s/m^2
'NEB': '', # 0-1 :
'Qair': 'specific_humidity_ml', # Kg/Kg : Kg/Kg - need to address lowest model level !?
'Rainf': 'rainfall_amount_pl', # kg/m2/s : kg/m2 - need to address PL and rate
'SCA_SWdown': '', # W/m2 :
'DIR_SWdown': 'integral_of_surface_downwelling_shortwave_flux_in_air_wrt_time', # W/m2 : W s/m2 - need to adjust for rate
'CO2_air': '', # kg/m3 :
'Snowf': 'snowfall_amount_pl', # kg/m2/s : kg/m2 - need to address PL and rate (divide by 3600 if hourly)
'theorSW': '', # W/m2 :
'UREF': '', # m :
'Wind': '', # m/s :
'Wind_DIR': '', # deg :
'aspect': '', # degrees from north :
'slope': '', # degrees from horizontal :
'ZREF': '', # m :
'ZS': '' # m :
}
def _set_crocus_eklima_lut(self):
# TODO: cross-check units
# TODO: cross-check time conversions and time reference
# TODO: conversion to correct units and rates where necessary
# Look-up table between Crocus FORCING.nc and eklima getMetData return
self.crocus_eklima_lut = {'time': 'time', # seconds since : seconds since
'LAT': 'latDec', # degrees_north : degrees_north - ok
'LON': 'lonDec', # degrees_east : degrees_east - ok
'Psurf': '', # Pa :
'Tair': 'TA', # : K : C
'HUMREL': '', # % :
'LWdown': '', # W/m2 :
'NEB': '', # 0-1 :
'Qair': '', # Kg/Kg :
'Rainf': 'RR_1', # kg/m2/s : mm
'SCA_SWdown': '', # W/m2 :
'DIR_SWdown': '', # W/m2 :
'CO2_air': '', # kg/m3 :
'Snowf': '', # kg/m2/s :
'theorSW': '', # W/m2 :
'UREF': '', # m : m should be 10 m
'Wind': 'FF', # m/s : m/s
'Wind_DIR': 'DD', # deg : deg
'aspect': '', # degrees from north : -
'slope': '', # degrees from horizontal : -
'ZREF': '', # m : m from station_props
'ZS': '' # m :
}
def insert_arome_var(self, var_name, arome_variables):
# TODO: can I pass a values to the function in a dict? http://code.activestate.com/recipes/181064/
self._arome_converter = {'Rainf': self._insert_arome_rainf()}
if var_name == 'Rainf':
pass
else:
pass
def _insert_arome_rainf(self):
pass
def insert_eklima_station(self, i, station, data):
'''
:param i: number of point in the Forcing file
:param station: dict['stnr'] returned from wsklima_parser.parse_get_stations_properties()
:param data: dict['stnr'] returned from wsklima_parser.parse_get_data()
:return:
'''
# Set time properties - only once not for each station
self.forc_time_step_v[:] = dt.seconds
# TODO: use date2num to get the time right
self.time_v[i] = time_v
self.time_v.units = t_units
# Set station properties
# self.aspect_v[:] = 0.0
self.uref_v[i] = 10.0
self.zref_v[i] = 2.0
self.zs_v[i] = station['amsl']
self.lat_v[i] = station['latDec']
self.lon_v[i] = station['lonDec']
for key in data.keys():
if key in self.crocus_eklima_lut.values():
self._insert_eklima_data(i, key, data[key])
# Set the created forcing parameters
# PTH
self.q_air_v[:, i] = q_air[:]
self.tair_v[:, i] = tair[:]
self.ps_surf_v[:, i] = p_surf[:]
# Precip
self.rain_fall_v[:, i] = rainf[:]
self.snow_fall_v[:, i] = snowf[:]
# Raadiation
self.dir_sw_down_v[:, i] = dir_sw_down[:]
self.sca_sw_down_v[:, i] = sca_sw_down[:]
self.lw_down_v[:, i] = lw_down[:]
# Wind
self.wind_v[:, i] = wind[:]
self.wind_dir_v[:, i] = wind_dir[:]
# Others
self.co2_air_v[:, i] = co2_air
def _insert_eklima_data(self, i, key, data):
# TODO: need to make sure that it is inserted at the correct time!!!
if key== 'TA':
self.tair_v[:, i] = data[:]
def _convert_eklima_precip(self, RR_1, TA):
'''
:param RR_1: amount of rain within last hour in mm from eklima station
:param TA: 2m air temperature in C from eklima station
:return: sets self.Rainf or self.Snowf in kg/m2/s
'''
if TA >= 0.5:
self.Snowf = 0.0
self.Rainf = 1000.0 * RR_1 / 3600.0
else:
self.Rainf = 0.0
self.Snowf = 1000.0 * RR_1 / 3600.0
def create_options_nam(self):
'''
* Returns: OPTIONs.nam file
TODO: adapt for multiple points
TODO: add option to insert an existing snow pack - maybe in a different function as optional
&NAM_PREP_ISBA_SNOW
CSNOW
NSNOW_LAYER
CFILE_SNOW
CTYPE_SNOW
CFILEPGD_SNOW
CTYPEPGD_SNOW
LSNOW_IDEAL
lSNOW_FRAC_TOT
XWSNOW
XZSNOW - NEW IN v8
XTSNOW
XLWCSNOW - NEW IN v8
XRSNOW
XASNOW
XSG1SNOW
XSG2SNOW
XHISTSNOW
XAGESNOW
'''
option_file = open('OPTIONS.nam', 'w')
option_template = Template(open('./Test/Data/OPTIONS.nam.tpl', 'r').read())
# Read the lines from the template, substitute the values, and write to the new config file
_date = self.time_v.units.split(' ')[2]
_time = self.time_v.units.split(' ')[3]
subst = dict(LAT=str(self.lat_v[0]),
LON=str(self.lon_v[0]),
NO_POINTS=1,
ZS=950,
YEAR=_date.split('-')[0],
MONTH=_date.split('-')[1],
DAY=_date.split('-')[2],
XTIME=float(_time.split(':')[0])*3600.,
)
_sub_str = option_template.substitute(subst)
option_file.write(_sub_str)
# Close the files
option_file.close()
#option_template.close()
def init_from_file(self, filename):
"""
TODO: adjust or remove
"""
# create a file (Dataset object, also the root group).
f = Dataset(filename, mode='r')
print(f.file_format)
print(f.dimensions['Number_of_points'])
print(f.dimensions['time'])
print(f.variables.keys())
for var in f.ncattrs():
print(var, getattr(f, var))
print(f.variables['Wind'])
print(f.variables['Wind'].units)
f.variables['Wind'][:] = []
print(f.variables['Wind'])
f.close()
def init_forcing_nc(no_points=1):
"""
Input no_points: Number of points used in the model grid
*_dim* indicates a netcdf-dimension
*_v* indicates a netcdf-variable
"""
# create a file (Dataset object, also the root group).
rootgrp = Dataset('FORCING.nc', 'w', format='NETCDF3_CLASSIC')
print(rootgrp.file_format)
##############
# Dimensions #
##############
time_dim = rootgrp.createDimension('time', None)
number_of_points_dim = rootgrp.createDimension('Number_of_points', no_points)
print(rootgrp.dimensions)
print(time_dim.isunlimited())
print(number_of_points_dim.isunlimited())
#############
# Variables #
#############
###########
# Scalars #
###########
forc_time_step_v = rootgrp.createVariable('FRC_TIME_STP','f8')
forc_time_step_v.units = 's'
forc_time_step_v.long_name = 'Forcing_Time_Step'
######
# 1D #
######
time_v = rootgrp.createVariable('time','f8',('time',))
# depends on FORC_TIME_STP units
time_v.units = 'hours/seconds since '
time_v.long_name = 'time'
lat_v = rootgrp.createVariable('LAT','f8',('Number_of_points',))
lat_v.units = 'degrees_north'
lat_v.long_name = 'latitude'
lon_v = rootgrp.createVariable('LON','f8',('Number_of_points',))
lon_v.units = 'degrees_east'
lon_v.long_name = 'longitude'
aspect_v = rootgrp.createVariable('aspect', 'f8', ('Number_of_points'))
aspect_v.units = 'degrees from north'
aspect_v.long_name = 'slope aspect'
slope_v = rootgrp.createVariable('slope','f8',('Number_of_points',))
slope_v.units = 'degrees from horizontal'
slope_v.long_name = 'slope angle'
uref_v = rootgrp.createVariable('UREF','f8',('Number_of_points',))
uref_v.units = 'm'
uref_v.long_name = 'Reference_Height_for_Wind'
zref_v = rootgrp.createVariable('ZREF','f8',('Number_of_points',))
zref_v.units = 'm'
zref_v.long_name = 'Reference_Height'
zs_v = rootgrp.createVariable('ZS','f8',('Number_of_points',))
zs_v.units = 'm'
zs_v.long_name = 'altitude'
######
# 2D #
######
co2_air_v = rootgrp.createVariable('CO2air','f8',('time', 'Number_of_points',))
co2_air_v.units = 'kg/m3'
co2_air_v.long_name = 'Near_Surface_CO2_Concentration'
dir_sw_down_v = rootgrp.createVariable('DIR_SWdown','f8',('Number_of_points',))
dir_sw_down_v.units = 'W/m2'
dir_sw_down_v.long_name = 'Surface_Indicent_Direct_Shortwave_Radiation'
hum_rel_v = rootgrp.createVariable('HUMREL','f8',('time', 'Number_of_points',))
hum_rel_v.units = '%'
hum_rel_v.long_name = 'Relative Humidity'
lw_down_v = rootgrp.createVariable('LWdown','f8',('time', 'Number_of_points',))
lw_down_v.units = 'W/m2'
lw_down_v.long_name = 'Surface_Incident_Longwave_Radiation'
neb_v = rootgrp.createVariable('NEB','f8',('time', 'Number_of_points',))
neb_v.units = 'between 0 and 1'
neb_v.long_name = 'Nebulosity'
ps_surf_v = rootgrp.createVariable('PSurf','f8',('time', 'Number_of_points',))
ps_surf_v.units = 'Pa'
ps_surf_v.long_name = 'Surface_Pressure'
q_air_v = rootgrp.createVariable('Qair','f8',('time', 'Number_of_points',))
q_air_v.units = 'Kg/Kg'
q_air_v.long_name = 'Near_Surface_Specific_Humidity'
rain_fall_v = rootgrp.createVariable('Rainf','f8',('time', 'Number_of_points',))
rain_fall_v.units = 'kg/m2/s'
rain_fall_v.long_name = 'Rainfall_Rate'
sca_sw_down_v = rootgrp.createVariable('SCA_SWdown','f8',('time', 'Number_of_points',))
sca_sw_down_v.units = 'W/m2'
sca_sw_down_v.long_name = 'Surface_Incident_Diffuse_Shortwave_Radiation'
snow_fall_v = rootgrp.createVariable('Snowf','f8',('time', 'Number_of_points',))
snow_fall_v.units = 'kg/m2/s'
snow_fall_v.long_name = 'Snowfall_Rate'
tair_v = rootgrp.createVariable('Tair','f8',('time', 'Number_of_points',))
tair_v.units = 'K'
tair_v.long_name = 'Near_Surface_Air_Temperature'
wind_v = rootgrp.createVariable('Wind','f8',('time', 'Number_of_points',))
wind_v.units = 'm/s'
wind_v.long_name = 'Wind_Speed'
wind_dir_v = rootgrp.createVariable('Wind_DIR','f8',('time', 'Number_of_points',))
wind_dir_v.units = 'deg'
wind_dir_v.long_name = 'Wind_Direction'
rootgrp.close()
def populate_forcing_nc(df):
"""
Add values to the empty netcdf file from the pandas DataFrame "df"
"""
id_dict = {'TAM': 'Tair'}
# Create new and empty FORCING.nc file with correct number of points
init_forcing_nc()
# Open FORCING.nc file, r+ ensures that it exists
nc = Dataset('FORCING.nc', 'r+', format='NETCDF3_CLASSIC')
# Fill the time variable
nc.variables['time'].units, nc.variables['time'][:] = get_nc_time(df.index)
print(nc.variables['time'])
for col in df.columns:
if col in id_dict.keys():
print(df[col], nc.variables[id_dict[col]])
nc.variables[id_dict[col]] = df[col]
print(nc.variables[id_dict[col]])
nc.close()
def get_nc_time(df_index):
#
print(df_index[0])
tinterval = df_index[1]-df_index[0]
print(tinterval)
# find out if it is hours or seconds that are most convinient
tstart = df_index[0]
return unit_str, time_array
def test_tutorial():
# 2 unlimited dimensions.
#temp = rootgrp.createVariable('temp','f4',('time','level','lat','lon',))
# this makes the compression 'lossy' (preserving a precision of 1/1000)
# try it and see how much smaller the file gets.
temp = rootgrp.createVariable('temp','f4',('time','level','lat','lon',),least_significant_digit=3)
# attributes.
import time
rootgrp.description = 'bogus example script'
rootgrp.history = 'Created ' + time.ctime(time.time())
rootgrp.source = 'netCDF4 python module tutorial'
latitudes.units = 'degrees north'
longitudes.units = 'degrees east'
levels.units = 'hPa'
temp.units = 'K'
times.units = 'hours since 0001-01-01 00:00:00.0'
times.calendar = 'gregorian'
for name in rootgrp.ncattrs():
print('Global attr', name, '=', getattr(rootgrp,name))
print(rootgrp)
print(rootgrp.__dict__)
print(rootgrp.variables)
print(rootgrp.variables['temp'])
import numpy
# no unlimited dimension, just assign to slice.
lats = numpy.arange(-90,91,2.5)
lons = numpy.arange(-180,180,2.5)
latitudes[:] = lats
longitudes[:] = lons
print('latitudes =\n',latitudes[:])
print('longitudes =\n',longitudes[:])
# append along two unlimited dimensions by assigning to slice.
nlats = len(rootgrp.dimensions['lat'])
nlons = len(rootgrp.dimensions['lon'])
print('temp shape before adding data = ',temp.shape)
from numpy.random.mtrand import uniform # random number generator.
temp[0:5,0:10,:,:] = uniform(size=(5,10,nlats,nlons))
print('temp shape after adding data = ',temp.shape)
# levels have grown, but no values yet assigned.
print('levels shape after adding pressure data = ',levels.shape)
# assign values to levels dimension variable.
levels[:] = [1000.,850.,700.,500.,300.,250.,200.,150.,100.,50.]
# fancy slicing
tempdat = temp[::2, [1,3,6], lats>0, lons>0]
print('shape of fancy temp slice = ',tempdat.shape)
print(temp[0, 0, [0,1,2,3], [0,1,2,3]].shape)
# fill in times.
from datetime import datetime, timedelta
from netCDF4 import num2date, date2num, date2index
dates = [datetime(2001,3,1)+n*timedelta(hours=12) for n in range(temp.shape[0])]
times[:] = date2num(dates,units=times.units,calendar=times.calendar)
print('time values (in units %s): ' % times.units+'\\n',times[:])
dates = num2date(times[:],units=times.units,calendar=times.calendar)
print('dates corresponding to time values:\\n',dates)
rootgrp.close()
# create a series of netCDF files with a variable sharing
# the same unlimited dimension.
for nfile in range(10):
f = Dataset('mftest'+repr(nfile)+'.nc','w',format='NETCDF4_CLASSIC')
f.createDimension('x',None)
x = f.createVariable('x','i',('x',))
x[0:10] = numpy.arange(nfile*10,10*(nfile+1))
f.close()
# now read all those files in at once, in one Dataset.
from netCDF4 import MFDataset
f = MFDataset('mftest*nc')
print(f.variables['x'][:])
# example showing how to save numpy complex arrays using compound types.
f = Dataset('complex.nc','w')
size = 3 # length of 1-d complex array
# create sample complex data.
datac = numpy.exp(1j*(1.+numpy.linspace(0, numpy.pi, size)))
print(datac.dtype)
# create complex128 compound data type.
complex128 = numpy.dtype([('real',numpy.float64),('imag',numpy.float64)])
complex128_t = f.createCompoundType(complex128,'complex128')
# create a variable with this data type, write some data to it.
f.createDimension('x_dim',None)
v = f.createVariable('cmplx_var',complex128_t,'x_dim')
data = numpy.empty(size,complex128) # numpy structured array
data['real'] = datac.real; data['imag'] = datac.imag
v[:] = data
# close and reopen the file, check the contents.
f.close()
f = Dataset('complex.nc')
print(f)
print(f.variables['cmplx_var'])
print(f.cmptypes)
print(f.cmptypes['complex128'])
v = f.variables['cmplx_var']
print(v.shape)
datain = v[:] # read in all the data into a numpy structured array
# create an empty numpy complex array
datac2 = numpy.empty(datain.shape,numpy.complex128)
# .. fill it with contents of structured array.
datac2.real = datain['real']
datac2.imag = datain['imag']
print(datac.dtype,datac)
print(datac2.dtype,datac2)
# more complex compound type example.
from netCDF4 import chartostring, stringtoarr
f = Dataset('compound_example.nc','w') # create a new dataset.
# create an unlimited dimension call 'station'
f.createDimension('station',None)
# define a compound data type (can contain arrays, or nested compound types).
NUMCHARS = 80 # number of characters to use in fixed-length strings.
winddtype = numpy.dtype([('speed','f4'),('direction','i4')])
statdtype = numpy.dtype([('latitude', 'f4'), ('longitude', 'f4'),
('surface_wind',winddtype),
('temp_sounding','f4',10),('press_sounding','i4',10),
('location_name','S1',NUMCHARS)])
# use this data type definitions to create a compound data types
# called using the createCompoundType Dataset method.
# create a compound type for vector wind which will be nested inside
# the station data type. This must be done first!
wind_data_t = f.createCompoundType(winddtype,'wind_data')
# now that wind_data_t is defined, create the station data type.
station_data_t = f.createCompoundType(statdtype,'station_data')
# create nested compound data types to hold the units variable attribute.
winddtype_units = numpy.dtype([('speed','S1',NUMCHARS),('direction','S1',NUMCHARS)])
statdtype_units = numpy.dtype([('latitude', 'S1',NUMCHARS), ('longitude', 'S1',NUMCHARS),
('surface_wind',winddtype_units),
('temp_sounding','S1',NUMCHARS),
('location_name','S1',NUMCHARS),
('press_sounding','S1',NUMCHARS)])
# create the wind_data_units type first, since it will nested inside
# the station_data_units data type.
wind_data_units_t = f.createCompoundType(winddtype_units,'wind_data_units')
station_data_units_t =\
f.createCompoundType(statdtype_units,'station_data_units')
# create a variable of of type 'station_data_t'
statdat = f.createVariable('station_obs', station_data_t, ('station',))
# create a numpy structured array, assign data to it.
data = numpy.empty(1,station_data_t)
data['latitude'] = 40.
data['longitude'] = -105.
data['surface_wind']['speed'] = 12.5
data['surface_wind']['direction'] = 270
data['temp_sounding'] = (280.3,272.,270.,269.,266.,258.,254.1,250.,245.5,240.)
data['press_sounding'] = range(800,300,-50)
# variable-length string datatypes are not supported inside compound types, so
# to store strings in a compound data type, each string must be
# stored as fixed-size (in this case 80) array of characters.
data['location_name'] = stringtoarr('Boulder, Colorado, USA',NUMCHARS)
# assign structured array to variable slice.
statdat[0] = data
# or just assign a tuple of values to variable slice
# (will automatically be converted to a structured array).
statdat[1] = (40.78,-73.99,(-12.5,90),
(290.2,282.5,279.,277.9,276.,266.,264.1,260.,255.5,243.),
range(900,400,-50),stringtoarr('New York, New York, USA',NUMCHARS))
print(f.cmptypes)
windunits = numpy.empty(1,winddtype_units)
stationobs_units = numpy.empty(1,statdtype_units)
windunits['speed'] = stringtoarr('m/s',NUMCHARS)
windunits['direction'] = stringtoarr('degrees',NUMCHARS)
stationobs_units['latitude'] = stringtoarr('degrees north',NUMCHARS)
stationobs_units['longitude'] = stringtoarr('degrees west',NUMCHARS)
stationobs_units['surface_wind'] = windunits
stationobs_units['location_name'] = stringtoarr('None', NUMCHARS)
stationobs_units['temp_sounding'] = stringtoarr('Kelvin',NUMCHARS)
stationobs_units['press_sounding'] = stringtoarr('hPa',NUMCHARS)
statdat.units = stationobs_units
# close and reopen the file.
f.close()
f = Dataset('compound_example.nc')
print(f)
statdat = f.variables['station_obs']
print(statdat)
# print out data in variable.
print('data in a variable of compound type:')
print('----')
for data in statdat[:]:
for name in statdat.dtype.names:
if data[name].dtype.kind == 'S': # a string
# convert array of characters back to a string for display.
units = chartostring(statdat.units[name])
print(name,': value =',chartostring(data[name]),\
': units=',units)
elif data[name].dtype.kind == 'V': # a nested compound type
units_list = [chartostring(s) for s in tuple(statdat.units[name])]
print(name,data[name].dtype.names,': value=',data[name],': units=',\
units_list)
else: # a numeric type.
units = chartostring(statdat.units[name])
print(name,': value=',data[name],': units=',units)
print('----')
f.close()
f = Dataset('tst_vlen.nc','w')
vlen_t = f.createVLType(numpy.int32, 'phony_vlen')
x = f.createDimension('x',3)
y = f.createDimension('y',4)
vlvar = f.createVariable('phony_vlen_var', vlen_t, ('y','x'))
import random
data = numpy.empty(len(y)*len(x),object)
for n in range(len(y)*len(x)):
data[n] = numpy.arange(random.randint(1,10),dtype='int32')+1
data = numpy.reshape(data,(len(y),len(x)))
vlvar[:] = data
print(vlvar)
print('vlen variable =\n',vlvar[:])
print(f)
print(f.variables['phony_vlen_var'])
print(f.vltypes['phony_vlen'])
z = f.createDimension('z', 10)
strvar = f.createVariable('strvar',str,'z')
chars = '1234567890aabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
data = numpy.empty(10,object)
for n in range(10):
stringlen = random.randint(2,12)
data[n] = ''.join([random.choice(chars) for i in range(stringlen)])
strvar[:] = data
print('variable-length string variable:\n',strvar[:])
print(f)
print(f.variables['strvar'])
f.close()
if __name__ == "__main__":
#init_from_file('FORCING.nc')
#init_forcing_nc()
fnc = CrocusForcing()
fnc.close()
| mit |
ombt/analytics | books/python_for_data_analysis/mine/ch1/ex1.py | 1 | 1424 | #!/usr/bin/python
#
# general libs
#
import sys
#
# data analysis libs
#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy
#
from collections import defaultdict, Counter
#
# path to book data
#
BD_PATH = "/home/ombt/sandbox/analytics/books/python_for_data_analysis/mine/pydata-book-1st-edition/"
#
import json
path = BD_PATH + 'ch02/usagov_bitly_data2012-03-16-1331923249.txt'
records = [json.loads(line) for line in open(path)]
#
print(records[0]['tz'])
#
time_zones = [rec['tz'] for rec in records if 'tz' in rec]
print(time_zones[:10])
#
def get_counts(sequence):
counts = {}
for x in sequence:
if x in counts:
counts[x] += 1
else:
counts[x] = 1
return counts
#
counts = get_counts(time_zones)
#
print("America/New_York ... {0}".format(counts['America/New_York']))
#
def get_counts2(sequence):
counts = defaultdict(int)
for x in sequence:
counts[x] += 1
return counts
#
counts2 = get_counts2(time_zones)
print("America/New_York ... {0}".format(counts2['America/New_York']))
#
print(len(time_zones))
#
def top_counts(count_dict,n=10):
value_key_pairs = [(count, tz) for tz, count in count_dict.items()]
value_key_pairs.sort()
return value_key_pairs[-n:]
#
top_10_counts = top_counts(counts2)
print(top_10_counts)
#
counts = Counter(time_zones)
print(counts.most_common(10))
#
# on page 21
#
sys.exit(0)
| mit |
eg-zhang/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
dwettstein/pattern-recognition-2016 | ip/doc_processor.py | 1 | 4822 | import os
from glob import glob
import matplotlib.pyplot as plt
from skimage.io import imread
from ip.features import compute_features
from ip.preprocess import word_preprocessor
from utils.fio import get_config, get_absolute_path, parse_svg, path2polygon
from utils.image import crop
from utils.transcription import get_transcription, get_word
def write_word_features(output_file, word_id, mat, fea):
handle = open(output_file, 'a')
handle.write(word_id + os.linesep)
[handle.write('%i\t' % x) for x in fea]
handle.write(os.linesep)
for row in mat:
for cell in row:
handle.write('%f\t' % cell)
handle.write(os.linesep)
handle.write('###' + os.linesep)
handle.close()
def main(imgpath=None, svgpath=None, outputfile=None, retake=True, saveimgs=True):
print('Word pre-processing')
config = get_config()
# create an output file
if outputfile is None:
txtp = get_absolute_path(config.get('KWS.features', 'file'))
else:
txtp = get_absolute_path(os.path.join(outputfile))
processed = []
if retake and os.path.exists(txtp):
takenext = False
for line in open(txtp, 'r'):
line = line.strip()
if takenext and (len(line) >= 9):
processed.append(line.strip())
takenext = False
elif line == "###":
takenext = True
else:
handle = open(txtp, 'w+')
for param, value in config.items('KWS.prepro'):
handle.write('%s: %s%s' % (param, value, os.linesep))
for param, value in config.items('KWS.features'):
handle.write('%s: %s%s' % (param, value, os.linesep))
handle.write('###' + os.linesep)
handle.close()
# get the data
if svgpath is None:
svgd = get_absolute_path(config.get('KWS', 'locations'))
else:
svgd = get_absolute_path(svgpath)
svgs = glob(os.path.join(svgd, '*.svg'))
if imgpath is None:
imgd = get_absolute_path(config.get('KWS', 'images'))
else:
imgd = get_absolute_path(imgpath)
imgs = glob(os.path.join(imgd, '*.jpg'))
# parse some parameter
threshold = float(config.get('KWS.prepro', 'segmentation_threshold'))
relative_height = float(config.get('KWS.prepro', 'relative_height'))
skew_resolution = float(config.get('KWS.prepro', 'angular_resolution'))
primary_peak_height = float(config.get('KWS.prepro', 'primary_peak_height'))
secondary_peak_height = float(config.get('KWS.prepro', 'secondary_peak_height'))
window_width = int(config.get('KWS.features', 'window_width'))
step_size = int(config.get('KWS.features', 'step_size'))
blocks = int(config.get('KWS.features', 'number_of_blocks'))
svgs.sort()
imgs.sort()
for svgp, imgp in zip(svgs, imgs):
svgid = os.path.basename(svgp).replace('.svg', '')
imgid = os.path.basename(imgp).replace('.jpg', '')
print('\t%s\n\t%s' % (svgp, imgp))
if svgid != imgid:
raise IOError('the id\'s of the image file (%s) and the svg file (%s) are not the same' % (svgid, imgid))
trans = get_transcription(svgid)
print('\tdoc id: %s' % svgid)
wids, paths = parse_svg(svgp)
img = imread(imgp)
for wid, path in zip(wids, paths):
print('\tword id: %s' % wid)
if retake and (processed.count(wid) == 1):
print('\talready processed')
continue
# look up the corresponding word
if saveimgs:
imgfile = wid
word = get_word(wid, data=trans)
if word is not None:
imgfile = word.code2string() + '_' + imgfile
else:
imgfile = None
# get the word image
poly = path2polygon(path)
roi = crop(img, poly)
pre, sym = word_preprocessor(roi,
threshold=threshold,
rel_height=relative_height,
skew_res=skew_resolution,
ppw=primary_peak_height,
spw=secondary_peak_height,
save=imgfile)
if type(pre) is str:
print('\tpre-processing failed\n\t\t%s' % pre)
continue
fea = compute_features(pre,
window_width=window_width,
step_size=step_size,
blocks=blocks)
write_word_features(txtp, wid, fea, [pre.shape[0], pre.shape[1], sym])
print('...')
if __name__ == '__main__':
main()
| mit |
jameshensman/VFF | experiments/mcmc_pines/plot_pines.py | 1 | 2449 | # Copyright 2016 James Hensman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from run_pines import build_model, getLocations
from sklearn.neighbors import KernelDensity
# import matplotlib2tikz
X = getLocations()
Ms = [14, 16, 18, 20, 22, 24, 26, 28, 30]
def plot_model(m, sample_df, ax, gridResolution=64):
intensities = []
for _, s in sample_df.iterrows():
m.set_parameter_dict(s)
mu, _ = m.predict_y(m.X.value)
intensities.append(mu)
intensity = np.mean(intensities, 0)
ax.imshow(np.flipud(intensity.reshape(gridResolution, gridResolution).T),
interpolation='nearest', extent=[0, 1, 0, 1], cmap=plt.cm.viridis,
vmin=0.005, vmax=0.18)
f, axes = plt.subplots(2, 5, sharex=True, sharey=True, figsize=(12, 5))
for ax, M in zip(axes.flat, Ms):
continue
m = build_model(M)
df = pd.read_pickle('samples_df_M{}.pickle'.format(M))
# df = df.ix[::100] # thin for speed
plot_model(m, df, ax)
ax.set_title(str(M))
# axes.flatten()[-1].plot(X[:, 0], X[:, 1], 'k.')
# matplotlib2tikz.save('pines_intensity.tikz')
# plot the convergence of the patermeters:
f, axes = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(12, 5))
keys = ['model.kerns.item0.lengthscales', 'model.kerns.item1.lengthscales']
titles = ['lengthscale (horz.)', 'lengthscale (vert)']
mins = [0, 0]
maxs = [0.4, 0.4]
for key, title, ax, xmin, xmax in zip(keys, titles, axes.flatten(), mins, maxs):
for M in Ms:
m = build_model(M)
df = pd.read_pickle('samples_df_M{}.pickle'.format(M))
ls = np.vstack(df[key])
kde = KernelDensity(kernel='gaussian', bandwidth=0.05).fit(ls)
X_plot = np.linspace(xmin, xmax, 100)[:, None]
ax.plot(X_plot, np.exp(kde.score_samples(X_plot)), label=str(M))
ax.legend()
# matplotlib2tikz.save('pines_lengthscale_convergence.tikz')
| apache-2.0 |
Ramanujakalyan/Inherit | choosing-k-in-kmeans/gap_stats.py | 25 | 7550 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
# Modified from:
# (c) 2014 Reid Johnson
#
# Modified from:
# (c) 2013 Mikael Vejdemo-Johansson
# BSD License
#
#
# The gap statistic is defined by Tibshirani, Walther, Hastie in:
# Estimating the number of clusters in a data set via the gap statistic
# J. R. Statist. Soc. B (2001) 63, Part 2, pp 411-423
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import scipy.spatial.distance
import scipy.stats
from sklearn.cluster import KMeans
dst = sp.spatial.distance.euclidean
def gap_statistics(data, refs=None, nrefs=10, ks=range(1,11)):
"""Computes the gap statistics for an nxm dataset.
The gap statistic measures the difference between within-cluster dispersion on an input
dataset and that expected under an appropriate reference null distribution.
Computation of the gap statistic, then, requires a series of reference (null) distributions.
One may either input a precomputed set of reference distributions (via the parameter refs)
or specify the number of reference distributions (via the parameter nrefs) for automatic
generation of uniform distributions within the bounding box of the dataset (data).
Each computation of the gap statistic requires the clustering of the input dataset and of
several reference distributions. To identify the optimal number of clusters k, the gap
statistic is computed over a range of possible values of k (via the parameter ks).
For each value of k, within-cluster dispersion is calculated for the input dataset and each
reference distribution. The calculation of the within-cluster dispersion for the reference
distributions will have a degree of variation, which we measure by standard deviation or
standard error.
The estimated optimal number of clusters, then, is defined as the smallest value k such that
gap_k is greater than or equal to the sum of gap_k+1 minus the expected error err_k+1.
Args:
data ((n,m) SciPy array): The dataset on which to compute the gap statistics.
refs ((n,m,k) SciPy array, optional): A precomputed set of reference distributions.
Defaults to None.
nrefs (int, optional): The number of reference distributions for automatic generation.
Defaults to 20.
ks (list, optional): The list of values k for which to compute the gap statistics.
Defaults to range(1,11), which creates a list of values from 1 to 10.
Returns:
gaps: an array of gap statistics computed for each k.
errs: an array of standard errors (se), with one corresponding to each gap computation.
difs: an array of differences between each gap_k and the sum of gap_k+1 minus err_k+1.
"""
shape = data.shape
if refs==None:
tops = data.max(axis=0) # maxima along the first axis (rows)
bots = data.min(axis=0) # minima along the first axis (rows)
dists = sp.matrix(sp.diag(tops-bots)) # the bounding box of the input dataset
# Generate nrefs uniform distributions each in the half-open interval [0.0, 1.0)
rands = sp.random.random_sample(size=(shape[0],shape[1], nrefs))
# Adjust each of the uniform distributions to the bounding box of the input dataset
for i in range(nrefs):
rands[:,:,i] = rands[:,:,i]*dists+bots
else:
rands = refs
gaps = sp.zeros((len(ks),)) # array for gap statistics (lenth ks)
errs = sp.zeros((len(ks),)) # array for model standard errors (length ks)
difs = sp.zeros((len(ks)-1,)) # array for differences between gaps (length ks-1)
for (i,k) in enumerate(ks): # iterate over the range of k values
# Cluster the input dataset via k-means clustering using the current value of k
kmeans = KMeans(n_clusters=k, n_init=2, n_jobs=-1).fit(data)
(kmc, kml) = kmeans.cluster_centers_, kmeans.labels_
# Generate within-dispersion measure for the clustering of the input dataset
disp = sum([dst(data[m,:],kmc[kml[m],:]) for m in range(shape[0])])
# Generate within-dispersion measures for the clusterings of the reference datasets
refdisps = sp.zeros((rands.shape[2],))
for j in range(rands.shape[2]):
# Cluster the reference dataset via k-means clustering using the current value of k
kmeans = KMeans(n_clusters=k, n_init=2, n_jobs=-1).fit(rands[:,:,j])
(kmc, kml) = kmeans.cluster_centers_, kmeans.labels_
refdisps[j] = sum([dst(rands[m,:,j],kmc[kml[m],:]) for m in range(shape[0])])
# Compute the (estimated) gap statistic for k
gaps[i] = sp.mean(sp.log(refdisps) - sp.log(disp))
# Compute the expected error for k
errs[i] = sp.sqrt(sum(((sp.log(refdisp)-sp.mean(sp.log(refdisps)))**2) \
for refdisp in refdisps)/float(nrefs)) * sp.sqrt(1+1/nrefs)
# Compute the difference between gap_k and the sum of gap_k+1 minus err_k+1
difs = sp.array([gaps[k] - (gaps[k+1]-errs[k+1]) for k in range(len(gaps)-1)])
#print "Gaps: " + str(gaps)
#print "Errs: " + str(errs)
#print "Difs: " + str(difs)
return gaps, errs, difs
def plot_gap_statistics(gaps, errs, difs):
"""Generates and shows plots for the gap statistics.
A figure with two subplots is generated. The first subplot is an errorbar plot of the
estimated gap statistics computed for each value of k. The second subplot is a barplot
of the differences in the computed gap statistics computed.
Args:
gaps (SciPy array): An array of gap statistics, one computed for each k.
errs (SciPy array): An array of standard errors (se), with one corresponding to each gap
computation.
difs (SciPy array): An array of differences between each gap_k and the sum of gap_k+1
minus err_k+1.
"""
# Create a figure
fig = plt.figure(figsize=(8,8))
#plt.subplots_adjust(wspace=0.35) # adjust the distance between figures
# Subplot 1
ax = fig.add_subplot(211)
ind = range(1,len(gaps)+1) # the x values for the gaps
# Create an errorbar plot
rects = ax.errorbar(ind, gaps, yerr=errs, xerr=None, linewidth=1.0)
# Add figure labels and ticks
ax.set_title('Clustering Gap Statistics', fontsize=16)
ax.set_xlabel('Number of clusters k', fontsize=14)
ax.set_ylabel('Gap Statistic', fontsize=14)
ax.set_xticks(ind)
# Add figure bounds
ax.set_ylim(0, max(gaps+errs)*1.1)
ax.set_xlim(0, len(gaps)+1.0)
# space b/w subplots
fig.subplots_adjust(hspace=.5)
# Subplot 2
ax = fig.add_subplot(212)
ind = range(1,len(difs)+1) # the x values for the difs
max_gap = None
if len(np.where(difs > 0)[0]) > 0:
max_gap = np.where(difs > 0)[0][0] + 1 # the k with the first positive dif
# Create a bar plot
ax.bar(ind, difs, alpha=0.5, color='g', align='center')
# Add figure labels and ticks
if max_gap:
ax.set_title('Clustering Gap Differences\n(k=%d Estimated as Optimal)' % (max_gap), \
fontsize=16)
else:
ax.set_title('Clustering Gap Differences\n', fontsize=16)
ax.set_xlabel('Number of clusters k', fontsize=14)
ax.set_ylabel('Gap Difference', fontsize=14)
ax.xaxis.set_ticks(range(1,len(difs)+1))
# Add figure bounds
ax.set_ylim(min(difs)*1.2, max(difs)*1.2)
ax.set_xlim(0, len(difs)+1.0)
# Show the figure
plt.show()
| unlicense |
sandeepdsouza93/TensorFlow-15712 | tensorflow/contrib/learn/python/learn/grid_search_test.py | 27 | 2080 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(tf.test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10],
n_classes=3)
grid_search = GridSearchCV(classifier,
{'hidden_units': [[5, 5], [10, 10]]},
scoring='accuracy',
fit_params={'steps': [50]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
Obus/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
PubuduSaneth/genome4d | juicebox2hibrowse_v2.py | 1 | 3871 |
# coding: utf-8
# # Read juicebox dump output and reformat to 7 column format
# ## Juicebox dump output format - Input of the program
# <pre>
# chr1:start chr2:start Normalized_interactions
# 10000 10000 311.05484
# 10000 20000 92.60087
# 20000 20000 296.0056
# 10000 30000 47.701942
# </pre>
# ## 7 column format - Output of the program
# <pre>
# chr1 start1 end1 chr2 start2 end2 Normalized_interactions
# 1 10000 20000 1 10000 20000 311.05484
# 1 10000 20000 1 20000 30000 92.60087
# 1 20000 30000 1 20000 30000 296.0056
# 1 10000 20000 1 30000 40000 47.701942
# </pre>
# ### runDump.bash: script that runs juicebox dump command
#
# #### Jucebox dump parameters:
# * Resolution: 100K
# * Normalization method: KR (normalization method used in the rao paper)
# * Output: observed notmalized values (in a sparce matrix)
#
# #### Bash script
# <pre>
# #!/bin/bash
# ## $1 == chr1
# ## $2 == chr2
# ## $3 == output_file
# echo -e "run juicebox dump\n\t resolution: 100000\n\t normalization method: KR\n\t output: observed notmalized sparce matrix"
# touch $3
#
# java -jar /Users/pubudu/Downloads/MacTools/juicebox_tools.7.5.jar dump observed KR /Users/pubudu/Documents/RefData/raoPaper/copy_GSE63525_HMEC_combined.hic $1 $2 BP 100000 $3
# echo -e "\#\#\#\#juicebox dump observed KR copy_GSE63525_HMEC_combined.hic $1 $2 BP 100000 $3\n$(cat $3)" > $3
# </pre>
import pandas as pd
import subprocess
from itertools import combinations
chr_list=['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','X','Y'] # List of chromosomes
f_counter = 0 # Counter for the number of files created
set_path = "/projects/rrresearch/Pubudu/hic/datasets/rao/GSE63525_HMEC/hic_file/" # path of the .hic file and dump files
file_list = [] # list of all the dump files created
res = 100000 # Resolution used to run juicebox dump command
def run_Dump(arg_list):
subprocess.call(['/projects/rrresearch/Pubudu/hic/tools/runDump.bash', str(arg_list[0]), str(arg_list[1]), ''.join([set_path, str(arg_list[2])]) ])
# Run juicebox dump to
for chroms in chr_list:
print 'Juicebox dump run - chromosome: {}'.format(chroms)
f_name = '{}.chr{}_chr{}.dumpOut.txt'.format(f_counter, chroms, chroms)
arg_list = [chroms, chroms, f_name]
file_list.append(f_name)
run_Dump(arg_list)
f_counter += 1
# itertools.combinations(iterable, r)
### Return r length subsequences of elements from the input iterable.
### Combinations are emitted in lexicographic sort order. So, if the input iterable is sorted, the combination tuples will be produced in sorted order.
### Elements are treated as unique based on their position, not on their value. So if the input elements are unique, there will be no repeat values in each combination.
for combo in combinations(chr_list, 2):
print 'Juicebox dump run - chromosomes: {} and {}'.format(combo[0], combo[1])
f_name = '{}.chr{}_chr{}.dumpOut.txt'.format(f_counter, combo[0], combo[1])
arg_list = [combo[0], combo[1], f_name]
file_list.append(f_name)
run_Dump(arg_list)
f_counter += 1
#print file_list
print len(file_list)
for f_name in file_list:
chr_pair = f_name.split('.')[1].split('_')
print 'Reformatting to 7 columns: {} - chr1 {} and chr2 {} '.format(''.join([set_path,f_name]), chr_pair[0], chr_pair[1])
file = open('{}.hibrowse.txt'.format(''.join([set_path,f_name])),'w')
with open(''.join([set_path,f_name])) as fx:
next(fx)
for line in fx:
line = line.replace('\n','').replace('\r','').split('\t')
if line[2] == 'NaN':
intCounts = 0
else:
intCounts = float(line[2])
file.write('{}\n'.format('\t'.join([chr_pair[0], line[0], str(int(line[0])+100000), chr_pair[1], line[1], str(int(line[1])+100000), str(intCounts)])))
file.close()
| gpl-3.0 |
schreiberx/sweet | doc/rexi/rexi_with_laplace/lap-rexi-tests/lapel.py | 1 | 1906 |
import sys
import os
import warnings
import numpy as np
import math
import matplotlib.pyplot as plt
from nilt import *
from nilt_constant import *
#Constant function reconstrution
def constant_recon(dt, trunc=0):
#Test function
def fhat(s):
return 1/s
nend = 256
nini = 32
nstore = int((nend-nini)/4)
error_circ=np.zeros(nstore)
error_ellip=np.zeros(nstore)
quad_points=np.zeros(nstore)
print("dt, N Circ_Error Ellip_Error")
for i, N in enumerate(range(nini, nend, 4)):
quad_points[i]=N
circ.set_quadrature(N)
ellip.set_quadrature(N)
nilt_circ=circ.apply_nilt(fhat, dt, trunc)
nilt_ellip=ellip.apply_nilt(fhat,dt, trunc)
error_circ[i]=np.abs(np.abs(nilt_circ)-1.0)
error_ellip[i]=np.abs(np.abs(nilt_ellip)-1.0)
print(dt, N,error_circ[i], error_ellip[i])
print()
return quad_points, error_circ, error_ellip
ellip = nilt("ellipse")
ellip.set_parameters(10.0, 0.5)
circ = nilt("circle")
circ.set_parameters(10.0)
circ.set_quadrature(32)
ellip.set_quadrature(32)
fig1, ax = plt.subplots()
ax.scatter(ellip.sn.real,ellip.sn.imag, label="Ellipse", marker=".")
ax.scatter(circ.sn.real,circ.sn.imag, label="Circle", marker=".")
ax.legend(loc="best")
plt.savefig("ellip_circ.png")
fig2, axes = plt.subplots(3,3, constrained_layout=True, figsize=(10,15))
plt.suptitle("Constant reconstruction")
dtlist = [0.1, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 8.0, 10.0]
for i, ax in enumerate(axes.reshape(-1)):
dt=dtlist[i]
quad_points, error_circ, error_ellip = constant_recon(dt, trunc=1)
ax.plot(quad_points,error_circ, label="Circle")
ax.plot(quad_points,error_ellip, label="Ellipse")
ax.set(xlabel="Number of quadrature points",
ylabel="Error", yscale='log', title="dt"+str(dt))
ax.legend(loc="best")
plt.savefig("const_recon_trunc.png")
plt.show()
| mit |
NUAAXXY/globOpt | evaluation/compareMappedGraphs.py | 2 | 2422 | import packages.project as project
import packages.primitive as primitive
import packages.processing
import packages.relationGraph as relgraph
import packages.io
import argparse
import matplotlib.pyplot as plt
import networkx as nx
from networkx.algorithms import isomorphism
import numpy as np
import unitTests.relations as test_relations
################################################################################
## Command line parsing
parser = argparse.ArgumentParser(description='Compare ground truth noise distribution (continuous generator and generated samples) and the result of the optimisation.')
parser.add_argument('projectdir')
args = parser.parse_args()
projectdir = args.projectdir
if projectdir[-1] == '/':
projectdir = projectdir[:-1]
projectname = projectdir.split('/')[-1]
projectfile = projectdir+'/gt/'+projectname+'.prj'
gtlinesfile = projectdir+'/gt/primitives.csv'
gtassignfile = projectdir+'/gt/points_primitives.csv'
cloudfile = projectdir+'/cloud.ply'
mappingfile = projectdir+'/corresp.csv'
linesfile_it1 = projectdir+'/primitives_it0.bonmin.csv'
assignfile_it1 = projectdir+'/points_primitives_it1.csv'
print 'Processing project ', projectname
################################################################################
## Reading input files
project = project.PyProject(projectfile)
cloud = packages.io.readPointCloudFromPly(cloudfile)
gtlines = primitive.readPrimitivesFromFile(gtlinesfile)
gtassign = packages.io.readPointAssignementFromFiles(gtassignfile)
lines_it1 = primitive.readPrimitivesFromFile(linesfile_it1)
assign_it1 = packages.io.readPointAssignementFromFiles(assignfile_it1)
# associative arrays, mapping
# - the gt primitive to the estimated primitive
# - the gt uid to the estimated uid
primitiveCorres, primitiveCorresId = packages.io.readPrimitiveCorrespondancesFromFiles(mappingfile, gtlines, lines_it1)
#gtlines = packages.processing.removeUnassignedPrimitives(gtlines, gtassign)
#lines_it1 = packages.processing.removeUnassignedPrimitives(lines_it1, assign_it1)
#gtassign = packages.processing.removeUnassignedPoint(gtlines, gtassign)
#assign_it1 = packages.processing.removeUnassignedPoint(lines_it1, assign_it1)
################################################################################
## Process
print test_relations.process(gtlines, gtassign, lines_it1, assign_it1, primitiveCorres, primitiveCorresId, True)
| apache-2.0 |
ycool/apollo | modules/tools/prediction/data_pipelines/cruise_models.py | 3 | 5231 | ###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from common.configure import parameters
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
import sklearn
from torch.utils.data import Dataset, DataLoader, sampler
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
import torch.nn as nn
import torch
from proto.cruise_model_pb2 import TensorParameter, InputParameter,\
Conv1dParameter, DenseParameter, ActivationParameter, MaxPool1dParameter,\
AvgPool1dParameter, LaneFeatureConvParameter, ObsFeatureFCParameter,\
ClassifyParameter, RegressParameter, CruiseModelParameter
import proto.cruise_model_pb2
import argparse
import logging
import numpy as np
import h5py
import os
"""
@requirement:
pytorch 0.4.1
"""
'''
This file includes all model definitions and related loss functions.
'''
'''
Model details:
- Fully-connected layers for classification and regression, respectively.
- It will compute a classification score indicating the probability
of the obstacle choosing the given lane.
- It will also compute a time indicating how soon the obstacle will reach
the center of the given lane.
'''
class FullyConn_NN(torch.nn.Module):
def __init__(self):
super(FullyConn_NN, self).__init__()
self.classify = torch.nn.Sequential(
nn.Linear(174, 88),
nn.Sigmoid(),
nn.Dropout(0.3),
nn.Linear(88, 55),
nn.Sigmoid(),
nn.Dropout(0.2),
nn.Linear(55, 23),
nn.Sigmoid(),
nn.Dropout(0.3),
nn.Linear(23, 10),
nn.Sigmoid(),
nn.Dropout(0.0),
nn.Linear(10, 1),
nn.Sigmoid()
)
self.regress = torch.nn.Sequential(
nn.Linear(174, 88),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(88, 23),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(23, 1),
nn.ReLU()
)
def forward(self, x):
out_c = self.classify(x)
out_r = self.regress(x)
return out_c, out_r
class FCNN_CNN1D(torch.nn.Module):
def __init__(self):
super(FCNN_CNN1D, self).__init__()
self.lane_feature_conv = torch.nn.Sequential(
nn.Conv1d(4, 10, 3, stride=1),\
#nn.BatchNorm1d(10),\
nn.ReLU(),\
#nn.Conv1d(10, 16, 3, stride=2),\
#nn.BatchNorm1d(16),\
#nn.ReLU(),\
nn.Conv1d(10, 25, 3, stride=2),\
#nn.BatchNorm1d(25)
)
self.lane_feature_maxpool = nn.MaxPool1d(4)
self.lane_feature_avgpool = nn.AvgPool1d(4)
self.lane_feature_dropout = nn.Dropout(0.0)
self.obs_feature_fc = torch.nn.Sequential(
nn.Linear(68, 40),
nn.Sigmoid(),
nn.Dropout(0.0),
nn.Linear(40, 24),
nn.Sigmoid(),
nn.Dropout(0.0),
)
self.classify = torch.nn.Sequential(
nn.Linear(124, 66),
nn.Sigmoid(),
nn.Dropout(0.3),
nn.Linear(66, 48),
nn.Sigmoid(),
nn.Dropout(0.1),
nn.Linear(48, 11),
nn.Sigmoid(),
nn.Dropout(0.1),
nn.Linear(11, 1),\
#nn.Sigmoid()
)
self.regress = torch.nn.Sequential(
nn.Linear(125, 77),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(77, 46),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(46, 12),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(12, 1),
nn.ReLU()
)
def forward(self, x):
lane_fea = x[:, -80:]
lane_fea = lane_fea.view(lane_fea.size(0), 4, 20)
obs_fea = x[:, :-80]
lane_fea = self.lane_feature_conv(lane_fea)
lane_fea_max = self.lane_feature_maxpool(lane_fea)
lane_fea_avg = self.lane_feature_avgpool(lane_fea)
lane_fea = torch.cat([lane_fea_max.view(lane_fea_max.size(0), -1),
lane_fea_avg.view(lane_fea_avg.size(0), -1)], 1)
lane_fea = self.lane_feature_dropout(lane_fea)
obs_fea = self.obs_feature_fc(obs_fea)
tot_fea = torch.cat([lane_fea, obs_fea], 1)
out_c = self.classify(tot_fea)
out_r = self.regress(torch.cat([tot_fea, out_c], 1))
return out_c, out_r
| apache-2.0 |
Traecp/MCA_GUI | McaGUI_v18.py | 3 | 73468 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import scipy.ndimage
from scipy import stats
from scipy.fftpack import fft, fftfreq, fftshift
import os, sys
import gc
from os import listdir
from os.path import isfile,join
import gtk
import matplotlib as mpl
import matplotlib.pyplot as plt
#mpl.use('GtkAgg')
from matplotlib.figure import Figure
#from matplotlib.axes import Subplot
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
from matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as NavigationToolbar
from matplotlib.cm import jet#, gist_rainbow # colormap
from matplotlib.widgets import Cursor
#from matplotlib.patches import Rectangle
from matplotlib import path
#import matplotlib.patches as patches
from matplotlib.ticker import MaxNLocator
import xrayutilities as xu
from lmfit import Parameters, minimize
import h5py as h5
from MCA_GUI import mca_spec as SP
__version__ = "1.1.8"
__date__ = "06/11/2014"
__author__ = "Thanh-Tra NGUYEN"
__email__ = "thanhtra0104@gmail.com"
#mpl.rcParams['font.size'] = 18.0
#mpl.rcParams['axes.labelsize'] = 'large'
mpl.rcParams['legend.fancybox'] = True
mpl.rcParams['legend.handletextpad'] = 0.5
mpl.rcParams['legend.fontsize'] = 'medium'
mpl.rcParams['figure.subplot.bottom'] = 0.13
mpl.rcParams['figure.subplot.top'] = 0.93
mpl.rcParams['figure.subplot.left'] = 0.14
mpl.rcParams['figure.subplot.right'] = 0.915
mpl.rcParams['savefig.dpi'] = 300
def Fourier(X,vect):
N = vect.size #number of data points
T = X[1] - X[0] #sample spacing
TF = fft(vect)
xf = fftfreq(N,T)
xf = fftshift(xf)
yplot = fftshift(TF)
yplot = np.abs(yplot)
yplot = yplot[N/2:]
xf = xf[N/2:]
return xf, yplot/yplot.max()
def flat_data(data,dynlow, dynhigh, log):
""" Returns data where maximum superior than 10^dynhigh will be replaced by 10^dynhigh, inferior than 10^dynlow will be replaced by 10^dynlow"""
if log:
mi = 10**dynlow
ma = 10**dynhigh
data=np.minimum(np.maximum(data,mi),ma)
data=np.log10(data)
else:
mi = dynlow
ma = dynhigh
data=np.minimum(np.maximum(data,mi),ma)
return data
def psdVoigt(parameters,x):
"""Define pseudovoigt function"""
y0 = parameters['y0'].value
xc = parameters['xc'].value
A = parameters['A'].value
w = parameters['w'].value
mu = parameters['mu'].value
y = y0 + A * ( mu * (2/np.pi) * (w / (4*(x-xc)**2 + w**2)) + (1 - mu) * (np.sqrt(4*np.log(2)) / (np.sqrt(np.pi) * w)) * np.exp(-(4*np.log(2)/w**2)*(x-xc)**2) )
return y
def objective(pars,y,x):
#we will minimize this function
err = y - psdVoigt(pars,x)
return err
def init(data_x,data_y,xc,arbitrary=False):
""" param = [y0, xc, A, w, mu]
Je veux que Xc soit la position que l'utilisateur pointe sur l'image pour tracer les profiles"""
param = Parameters()
#idA=np.where(data_x - xc < 1e-4)[0]
if arbitrary:
A = data_y.max()
else:
idA=np.where(data_x==xc)[0][0]
A = data_y[idA]
y0 = 1.0
w = 0.5
mu = 0.5
param.add('y0', value=y0)
param.add('xc', value=xc)
param.add('A', value=A)
param.add('w', value=w)
param.add('mu', value=mu, min=0., max=1.)
return param
def fit(data_x,data_y,xc, arbitrary=False):
""" return: fitted data y, fitted parameters """
param_init = init(data_x,data_y,xc,arbitrary)
if data_x[0] > data_x[-1]:
data_x = data_x[::-1]
result = minimize(objective, param_init, args=(data_y,data_x))
x = np.linspace(data_x.min(),data_x.max(),data_x.shape[0])
y = psdVoigt(param_init,x)
return param_init, y
class PopUpFringes(object):
def __init__(self, xdata, xlabel, ylabel, title):
self.popupwin=gtk.Window()
self.popupwin.set_size_request(600,550)
self.popupwin.set_position(gtk.WIN_POS_CENTER)
self.popupwin.set_border_width(10)
self.xdata = xdata
vbox = gtk.VBox()
self.fig=Figure(dpi=100)
self.ax = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
self.ax.set_xlabel(xlabel, fontsize = 18)
self.ax.set_ylabel(ylabel, fontsize = 18)
self.ax.set_title(title, fontsize = 18)
xi = np.arange(len(self.xdata))
slope, intercept, r_value, p_value, std_err = stats.linregress(self.xdata,xi)
fitline = slope*self.xdata+intercept
self.ax.plot(self.xdata, fitline, 'r-',self.xdata,xi, 'bo')
self.ax.axis([self.xdata.min(),self.xdata.max(),xi.min()-1, xi.max()+1])
self.ax.text(0.3, 0.9,'Slope = %.4f +- %.4f' % (slope, std_err),
horizontalalignment='center',
verticalalignment='center',
transform = self.ax.transAxes,
color='red')
vbox.pack_start(self.main_figure_navBar, False, False, 0)
vbox.pack_start(self.canvas, True, True, 2)
self.popupwin.add(vbox)
self.popupwin.connect("destroy", self.dest)
self.popupwin.show_all()
def dest(self,widget):
self.popupwin.destroy()
class PopUpImage(object):
def __init__(self, xdata, ydata, xlabel, ylabel, title):
self.popupwin=gtk.Window()
self.popupwin.set_size_request(600,550)
self.popupwin.set_position(gtk.WIN_POS_CENTER)
self.popupwin.set_border_width(10)
self.xdata = xdata
self.ydata = ydata
vbox = gtk.VBox()
self.fig=Figure(dpi=100)
self.ax = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
self.canvas.mpl_connect("button_press_event",self.on_press)
self.ax.set_xlabel(xlabel, fontsize = 18)
self.ax.set_ylabel(ylabel, fontsize = 18)
self.ax.set_title(title, fontsize = 18)
self.ax.plot(self.xdata, self.ydata, 'b-', lw=2)
self.textes = []
self.plots = []
vbox.pack_start(self.main_figure_navBar, False, False, 0)
vbox.pack_start(self.canvas, True, True, 2)
self.popupwin.add(vbox)
self.popupwin.connect("destroy", self.dest)
self.popupwin.show_all()
def dest(self,widget):
self.popupwin.destroy()
def on_press(self, event):
if event.inaxes == self.ax and event.button==3:
self.clear_notes()
xc = event.xdata
#***** Find the closest x value *****
residuel = self.xdata - xc
residuel = np.abs(residuel)
j = np.argmin(residuel)
#y = self.ydata[i-1:i+1]
#yc= y.max()
#j = np.where(self.ydata == yc)
#j = j[0][0]
xc= self.xdata[j]
x_fit = self.xdata[j-3:j+3]
y_fit = self.ydata[j-3:j+3]
fitted_param, fitted_data = fit(x_fit, y_fit, xc, True)
x_fit = np.linspace(x_fit.min(), x_fit.max(), 200)
y_fit = psdVoigt(fitted_param, x_fit)
period = fitted_param['xc'].value
std_err= fitted_param['xc'].stderr
p = self.ax.plot(x_fit, y_fit,'r-')
p2 = self.ax.axvline(period,color='green',lw=2)
txt=self.ax.text(0.05, 0.9, 'Period = %.4f +- %.4f (nm)'%(period, std_err), transform = self.ax.transAxes, color='red')
self.textes.append(txt)
self.plots.append(p[0])
self.plots.append(p2)
elif event.inaxes == self.ax and event.button==2:
dif = np.diff(self.ydata)
dif = dif/dif.max()
p3=self.ax.plot(dif,'r-')
self.plots.append(p3[0])
self.canvas.draw()
def clear_notes(self):
if len(self.textes)>0:
for t in self.textes:
t.remove()
if len(self.plots)>0:
for p in self.plots:
p.remove()
self.textes = []
self.plots = []
class MyMainWindow(gtk.Window):
def __init__(self):
super(MyMainWindow, self).__init__()
self.set_title("MCA Reciprocal space map processing. Version %s - last update on: %s"%(__version__,__date__))
self.set_size_request(1200,900)
self.set_position(gtk.WIN_POS_CENTER)
self.set_border_width(10)
self.toolbar = gtk.Toolbar()
self.toolbar.set_style(gtk.TOOLBAR_ICONS)
self.refreshtb = gtk.ToolButton(gtk.STOCK_REFRESH)
self.opentb = gtk.ToolButton(gtk.STOCK_OPEN)
self.sep = gtk.SeparatorToolItem()
self.aspecttb = gtk.ToolButton(gtk.STOCK_PAGE_SETUP)
self.quittb = gtk.ToolButton(gtk.STOCK_QUIT)
self.toolbar.insert(self.opentb, 0)
self.toolbar.insert(self.refreshtb, 1)
self.toolbar.insert(self.aspecttb, 2)
self.toolbar.insert(self.sep, 3)
self.toolbar.insert(self.quittb, 4)
self.tooltips = gtk.Tooltips()
self.tooltips.set_tip(self.refreshtb,"Reload data files")
self.tooltips.set_tip(self.opentb,"Open a folder containing HDF5 (*.h5) data files")
self.tooltips.set_tip(self.aspecttb,"Change the graph's aspect ratio")
self.tooltips.set_tip(self.quittb,"Quit the program")
self.opentb.connect("clicked", self.choose_folder)
self.refreshtb.connect("clicked",self.folder_update)
self.aspecttb.connect("clicked",self.change_aspect_ratio)
self.quittb.connect("clicked", gtk.main_quit)
self.graph_aspect = False #Flag to change the aspect ratio of the graph, False = Auto, True = equal
############################# BOXES ###############################################
vbox = gtk.VBox()
vbox.pack_start(self.toolbar,False,False,0)
hbox=gtk.HBox()
######################### TREE VIEW #############################################
self.sw = gtk.ScrolledWindow()
self.sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
hbox.pack_start(self.sw, False, False, 0)
self.store=[]
self.list_store = gtk.ListStore(str)
self.treeView = gtk.TreeView(self.list_store)
self.treeView.connect("row-activated",self.on_changed_rsm)
rendererText = gtk.CellRendererText()
self.TVcolumn = gtk.TreeViewColumn("RSM data files", rendererText, text=0)
self.TVcolumn.set_sort_column_id(0)
self.treeView.append_column(self.TVcolumn)
self.sw.add(self.treeView)
self.GUI_current_folder = self.DATA_current_folder = os.getcwd()
#******************************************************************
# Notebooks
#******************************************************************
self.notebook = gtk.Notebook()
self.page_GUI = gtk.HBox()
self.page_conversion = gtk.VBox()
self.page_XRDML = gtk.VBox()
######################################FIGURES####################33
#self.page_single_figure = gtk.HBox()
self.midle_panel = gtk.VBox()
self.rsm = ""
self.rsm_choosen = ""
self.my_notes = []
self.lines = []
self.points=[]
self.polygons=[]
self.fig=Figure(dpi=100)
## Draw line for arbitrary profiles
self.arb_lines_X = []
self.arb_lines_Y = []
self.arb_line_points = 0
#self.ax = self.fig.add_subplot(111)
self.ax = self.fig.add_axes([0.1,0.2,0.7,0.7])
self.fig.subplots_adjust(left=0.1,bottom=0.20, top=0.90)
self.vmin = 0
self.vmax = 1000
self.vmax_range = self.vmax
self.canvas = FigureCanvas(self.fig)
Fig_hbox = gtk.HBox()
self.Export_HQ_Image_btn = gtk.Button("Save HQ image")
self.Export_HQ_Image_btn.connect("clicked", self.Export_HQ_Image)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
#Global color bar
self.cax = self.fig.add_axes([0.85, 0.20, 0.03, 0.70])#left,bottom,width,height
#self.canvas.mpl_connect("motion_notify_event",self.on_motion)
self.canvas.mpl_connect("button_press_event",self.on_press)
#self.canvas.mpl_connect("button_release_event",self.on_release)
self.mouse_moved = False #If click without move: donot zoom the image
Fig_hbox.pack_start(self.Export_HQ_Image_btn, False, False, 0)
Fig_hbox.pack_start(self.main_figure_navBar, True,True, 0)
self.midle_panel.pack_start(Fig_hbox, False,False, 0)
self.midle_panel.pack_start(self.canvas, True,True, 2)
self.page_GUI.pack_start(self.midle_panel, True,True, 0)
#hbox.pack_start(self.midle_panel, True,True, 0)
########################################## RIGHT PANEL ###################
self.right_panel = gtk.VBox(False,0)
self.linear_scale_btn = gtk.ToggleButton("Linear scale")
self.linear_scale_btn.set_usize(30,0)
self.linear_scale_btn.connect("toggled",self.log_update)
self.log_scale=0
#self.wavelength_txt = gtk.Label("Energy (eV)")
##self.wavelength_txt.set_alignment(1,0.5)
#self.wavelength_field = gtk.Entry()
#self.wavelength_field.set_text("8333")
#self.wavelength_field.set_usize(30,0)
#self.lattice_const_txt = gtk.Label("Lattice constant (nm)")
#self.lattice_const_txt.set_alignment(1,0.5)
#self.lattice_const = gtk.Entry()
#self.lattice_const.set_text("0.5431")
#self.lattice_const.set_usize(30,0)
self.int_range_txt = gtk.Label("Integration range")
self.int_range_txt.set_alignment(1,0.5)
self.int_range = gtk.Entry()
self.int_range.set_text("0.05")
self.int_range.set_usize(30,0)
self.fitting_range_txt = gtk.Label("Fitting range")
self.fitting_range_txt.set_alignment(1,0.5)
self.fitting_range = gtk.Entry()
self.fitting_range.set_text("0.1")
self.fitting_range.set_usize(30,0)
# ********** Set the default values for configuration *************
self.plotXYprofiles_btn = gtk.RadioButton(None,"Plot X,Y profiles")
self.plotXYprofiles_btn.set_active(False)
self.arbitrary_profiles_btn = gtk.RadioButton(self.plotXYprofiles_btn,"Arbitrary profiles")
self.rectangle_profiles_btn = gtk.RadioButton(self.plotXYprofiles_btn,"ROI projection")
self.option_table = gtk.Table(4,3,False)#Pack the options
self.option_table.attach(self.linear_scale_btn, 0,1,0,1)
self.option_table.attach(self.plotXYprofiles_btn,0,1,1,2)
self.option_table.attach(self.arbitrary_profiles_btn,0,1,2,3)
self.option_table.attach(self.rectangle_profiles_btn,0,1,3,4)
# self.option_table.attach(self.wavelength_txt,1,2,0,1)
# self.option_table.attach(self.wavelength_field,2,3,0,1)
# self.option_table.attach(self.lattice_const_txt,1,2,1,2)
# self.option_table.attach(self.lattice_const, 2,3,1,2)
self.option_table.attach(self.int_range_txt, 1,2,0,1)
self.option_table.attach(self.int_range, 2,3,0,1)
self.option_table.attach(self.fitting_range_txt, 1,2,1,2)
self.option_table.attach(self.fitting_range, 2,3,1,2)
### Options for profile plots
self.profiles_log_btn = gtk.ToggleButton("Y-Log")
self.profiles_log_btn.connect("toggled",self.profiles_update)
self.profiles_export_data_btn = gtk.Button("Export data")
self.profiles_export_data_btn.connect("clicked",self.profiles_export)
self.profiles_option_box = gtk.HBox(False,0)
self.profiles_option_box.pack_start(self.profiles_log_btn, False, False, 0)
self.profiles_option_box.pack_start(self.profiles_export_data_btn, False, False, 0)
### Figure of profiles plot
self.profiles_fringes = []
self.fig_profiles = Figure()
self.profiles_ax1 = self.fig_profiles.add_subplot(211)
self.profiles_ax1.set_title("Qz profile", size=14)
self.profiles_ax2 = self.fig_profiles.add_subplot(212)
self.profiles_ax2.set_title("Qx profile", size=14)
self.profiles_canvas = FigureCanvas(self.fig_profiles)
self.profiles_canvas.set_size_request(450,50)
self.profiles_canvas.mpl_connect("button_press_event",self.profile_press)
self.profiles_navBar = NavigationToolbar(self.profiles_canvas, self)
self.cursor_pro1 = Cursor(self.profiles_ax1, color='k', linewidth=1, useblit=True)
self.cursor_pro2 = Cursor(self.profiles_ax2, color='k', linewidth=1, useblit=True)
#### Results of fitted curves
self.fit_results_table = gtk.Table(7,3, False)
title = gtk.Label("Fitted results:")
self.chi_title = gtk.Label("Qz profile")
self.tth_title = gtk.Label("Qx profile")
y0 = gtk.Label("y0:")
xc = gtk.Label("xc:")
A = gtk.Label("A:")
w = gtk.Label("FWHM:")
mu = gtk.Label("mu:")
y0.set_alignment(0,0.5)
xc.set_alignment(0,0.5)
A.set_alignment(0,0.5)
w.set_alignment(0,0.5)
mu.set_alignment(0,0.5)
self.Qz_fitted_y0 = gtk.Label()
self.Qz_fitted_xc = gtk.Label()
self.Qz_fitted_A = gtk.Label()
self.Qz_fitted_w = gtk.Label()
self.Qz_fitted_mu = gtk.Label()
self.Qx_fitted_y0 = gtk.Label()
self.Qx_fitted_xc = gtk.Label()
self.Qx_fitted_A = gtk.Label()
self.Qx_fitted_w = gtk.Label()
self.Qx_fitted_mu = gtk.Label()
self.fit_results_table.attach(title,0,3,0,1)
self.fit_results_table.attach(self.chi_title,1,2,1,2)
self.fit_results_table.attach(self.tth_title,2,3,1,2)
self.fit_results_table.attach(y0,0,1,2,3)
self.fit_results_table.attach(xc,0,1,3,4)
self.fit_results_table.attach(A,0,1,4,5)
self.fit_results_table.attach(w,0,1,5,6)
self.fit_results_table.attach(mu,0,1,6,7)
self.fit_results_table.attach(self.Qz_fitted_y0,1,2,2,3)
self.fit_results_table.attach(self.Qz_fitted_xc,1,2,3,4)
self.fit_results_table.attach(self.Qz_fitted_A,1,2,4,5)
self.fit_results_table.attach(self.Qz_fitted_w,1,2,5,6)
self.fit_results_table.attach(self.Qz_fitted_mu,1,2,6,7)
self.fit_results_table.attach(self.Qx_fitted_y0,2,3,2,3)
self.fit_results_table.attach(self.Qx_fitted_xc,2,3,3,4)
self.fit_results_table.attach(self.Qx_fitted_A,2,3,4,5)
self.fit_results_table.attach(self.Qx_fitted_w,2,3,5,6)
self.fit_results_table.attach(self.Qx_fitted_mu,2,3,6,7)
#### PACK the right panel
self.right_panel.pack_start(self.option_table, False, False, 0)
self.right_panel.pack_start(self.profiles_option_box,False,False,0)
self.right_panel.pack_start(self.profiles_navBar,False,False,0)
self.right_panel.pack_start(self.profiles_canvas,True,True,0)
self.right_panel.pack_start(self.fit_results_table, False, False, 0)
self.page_GUI.pack_end(self.right_panel,False, False,5)
#********************************************************************
# Conversion data SPEC to HDF page
#********************************************************************
self.conv_box = gtk.VBox()
self.box1 = gtk.HBox()
self.det_frame = gtk.Frame()
self.det_frame.set_label("Detector Vantec")
self.det_frame.set_label_align(0.5,0.5)
self.exp_frame = gtk.Frame()
self.exp_frame.set_label("Experiment parameters")
self.exp_frame.set_label_align(0.5,0.5)
self.conv_frame = gtk.Frame()
self.conv_frame.set_label("Data conversion: SPEC-HDF5")
self.conv_frame.set_label_align(0.5,0.5)
#self.conv_frame.set_alignment(0.5,0.5)
#********************************************************************
# Detector parameters
#********************************************************************
self.det_table = gtk.Table(6,2,False)
self.t1 = gtk.Label("Detector size (mm)")
self.t2 = gtk.Label("Number of channels")
self.t3 = gtk.Label("Center channel")
self.t4 = gtk.Label("Channels/Degree")
self.t5 = gtk.Label("ROI (from-to)")
self.t6 = gtk.Label("Orientation")
self.t1.set_alignment(0,0.5)
self.t2.set_alignment(0,0.5)
self.t3.set_alignment(0,0.5)
self.t4.set_alignment(0,0.5)
self.t5.set_alignment(0,0.5)
self.t6.set_alignment(0,0.5)
self.t1_entry = gtk.Entry()
self.t1_entry.set_text("50")
self.t2_entry = gtk.Entry()
self.t2_entry.set_text("2048")
self.t3_entry = gtk.Entry()
self.t3_entry.set_text("819.87")
self.t4_entry = gtk.Entry()
self.t4_entry.set_text("211.012")
self.small_box = gtk.HBox()
self.t5_label = gtk.Label("-")
self.t5_entry1 = gtk.Entry()
self.t5_entry1.set_text("40")
self.t5_entry2 = gtk.Entry()
self.t5_entry2.set_text("1300")
self.small_box.pack_start(self.t5_entry1,True, True,0)
self.small_box.pack_start(self.t5_label,True, True,0)
self.small_box.pack_start(self.t5_entry2,True, True,0)
self.t6_entry = gtk.combo_box_new_text()
self.t6_entry.append_text("Up (zero on the bottom)")
self.t6_entry.append_text("Down (zero on the top)")
self.t6_entry.set_active(1)
self.det_table.attach(self.t1, 0,1,0,1)
self.det_table.attach(self.t2, 0,1,1,2)
self.det_table.attach(self.t3, 0,1,2,3)
self.det_table.attach(self.t4, 0,1,3,4)
self.det_table.attach(self.t5, 0,1,4,5)
self.det_table.attach(self.t6, 0,1,5,6)
self.det_table.attach(self.t1_entry, 1,2,0,1)
self.det_table.attach(self.t2_entry, 1,2,1,2)
self.det_table.attach(self.t3_entry, 1,2,2,3)
self.det_table.attach(self.t4_entry, 1,2,3,4)
self.det_table.attach(self.small_box, 1,2,4,5)
self.det_table.attach(self.t6_entry, 1,2,5,6)
self.det_table_align = gtk.Alignment()
self.det_table_align.set_padding(15,10,10,10)
self.det_table_align.set(0.5, 0.5, 1.0, 1.0)
self.det_table_align.add(self.det_table)
self.det_frame.add(self.det_table_align)
#********************************************************************
# Experiment parameters
#********************************************************************
self.exp_table = gtk.Table(6,2,False)
self.e1 = gtk.Label("Substrate material:")
self.e1_other = gtk.Label("If other:")
self.e2 = gtk.Label("Energy (eV)")
self.e3 = gtk.Label("Attenuation coefficient file")
self.e4 = gtk.Label("Foil colunm name (in SPEC file)")
self.e5 = gtk.Label("Monitor colunm name (in SPEC file)")
self.e6 = gtk.Label("Reference monitor (for normalization)")
self.e1.set_alignment(0,0.5)
self.e1_other.set_alignment(1,0.5)
self.e2.set_alignment(0,0.5)
self.e3.set_alignment(0,0.5)
self.e4.set_alignment(0,0.5)
self.e5.set_alignment(0,0.5)
self.e6.set_alignment(0,0.5)
#self.e1_entry = gtk.Label("Si for now")
self.e1_entry = gtk.combo_box_new_text()
self.e1_entry.append_text("-- other")
self.e1_entry.append_text("Si")
self.e1_entry.append_text("Ge")
self.e1_entry.append_text("GaAs")
self.e1_entry.append_text("GaP")
self.e1_entry.append_text("GaSb")
self.e1_entry.append_text("InAs")
self.e1_entry.append_text("InP")
self.e1_entry.append_text("InSb")
self.e1_entry.set_active(1)
self.e1_entry_other = gtk.Entry()
self.e1_entry_other.set_text("")
self.e2_entry = gtk.Entry()
self.e2_entry.set_text("8333")
self.e3_box = gtk.HBox()
self.e3_path =gtk.Entry()
self.e3_browse = gtk.Button("Browse")
self.e3_browse.connect("clicked", self.select_file, self.e3_path, "A")
self.e3_box.pack_start(self.e3_path, False, False, 0)
self.e3_box.pack_start(self.e3_browse, False, False, 0)
self.e4_entry = gtk.Entry()
self.e4_entry.set_text("pfoil")
self.e5_entry = gtk.Entry()
self.e5_entry.set_text("vct3")
self.e6_entry = gtk.Entry()
self.e6_entry.set_text("1e6")
substrate_box1 = gtk.HBox()
substrate_box2 = gtk.HBox()
substrate_box1.pack_start(self.e1, False, False, 0)
substrate_box1.pack_start(self.e1_entry, False, False, 0)
substrate_box2.pack_start(self.e1_other, False, False, 0)
substrate_box2.pack_start(self.e1_entry_other, False, False, 0)
self.exp_table.attach(substrate_box1, 0,1,0,1)
self.exp_table.attach(self.e2, 0,1,1,2)
self.exp_table.attach(self.e3, 0,1,2,3)
self.exp_table.attach(self.e4, 0,1,3,4)
self.exp_table.attach(self.e5, 0,1,4,5)
self.exp_table.attach(self.e6, 0,1,5,6)
self.exp_table.attach(substrate_box2, 1,2,0,1)
self.exp_table.attach(self.e2_entry, 1,2,1,2)
self.exp_table.attach(self.e3_box, 1,2,2,3)
self.exp_table.attach(self.e4_entry, 1,2,3,4)
self.exp_table.attach(self.e5_entry, 1,2,4,5)
self.exp_table.attach(self.e6_entry, 1,2,5,6)
self.exp_table_align = gtk.Alignment()
self.exp_table_align.set_padding(15,10,10,10)
self.exp_table_align.set(0.5, 0.5, 1.0, 1.0)
self.exp_table_align.add(self.exp_table)
self.exp_frame.add(self.exp_table_align)
#********************************************************************
# Data conversion information
#********************************************************************
self.conv_table = gtk.Table(6,3,False)
self.c1 = gtk.Label("Spec file")
self.c2 = gtk.Label("MCA file")
self.c3 = gtk.Label("Destination folder")
self.c4 = gtk.Label("Scan number (from-to)")
self.c5 = gtk.Label("Description for each RSM (optional-separate by comma)")
self.c6 = gtk.Label("Problem of foil delay (foil[n]-->data[n+1])")
self.c1.set_alignment(0,0.5)
self.c2.set_alignment(0,0.5)
self.c3.set_alignment(0,0.5)
self.c4.set_alignment(0,0.5)
self.c5.set_alignment(0,0.5)
self.c6.set_alignment(0,0.5)
self.c1_entry1 = gtk.Entry()
self.c2_entry1 = gtk.Entry()
self.c3_entry1 = gtk.Entry()
self.c4_entry1 = gtk.Entry()
self.c5_entry1 = gtk.Entry()
self.c5_entry1.set_text("")
self.c6_entry = gtk.CheckButton()
self.c1_entry2 = gtk.Button("Browse SPEC")
self.c2_entry2 = gtk.Button("Browse MCA")
self.c3_entry2 = gtk.Button("Browse Folder")
self.c4_entry2 = gtk.Entry()
self.c1_entry2.connect("clicked", self.select_file, self.c1_entry1, "S")
self.c2_entry2.connect("clicked", self.select_file, self.c2_entry1, "M")
self.c3_entry2.connect("clicked", self.select_folder, self.c3_entry1, "D")
self.conv_table.attach(self.c1, 0,1,0,1)
self.conv_table.attach(self.c2, 0,1,1,2)
self.conv_table.attach(self.c3, 0,1,2,3)
self.conv_table.attach(self.c4, 0,1,3,4)
self.conv_table.attach(self.c5, 0,1,4,5)
self.conv_table.attach(self.c6, 0,1,5,6)
self.conv_table.attach(self.c1_entry1, 1,2,0,1)
self.conv_table.attach(self.c2_entry1, 1,2,1,2)
self.conv_table.attach(self.c3_entry1, 1,2,2,3)
self.conv_table.attach(self.c4_entry1, 1,2,3,4)
self.conv_table.attach(self.c5_entry1, 1,3,4,5)
self.conv_table.attach(self.c6_entry, 1,2,5,6)
self.conv_table.attach(self.c1_entry2, 2,3,0,1)
self.conv_table.attach(self.c2_entry2, 2,3,1,2)
self.conv_table.attach(self.c3_entry2, 2,3,2,3)
self.conv_table.attach(self.c4_entry2, 2,3,3,4)
self.conv_table_align = gtk.Alignment()
self.conv_table_align.set_padding(15,10,10,10)
self.conv_table_align.set(0.5, 0.5, 1.0, 1.0)
self.conv_table_align.add(self.conv_table)
self.conv_frame.add(self.conv_table_align)
#********************************************************************
# The RUN button
#********************************************************************
self.run_conversion = gtk.Button("Execute")
self.run_conversion.connect("clicked", self.spec2HDF)
self.run_conversion.set_size_request(50,30)
self.show_info = gtk.Label()
#********************************************************************
# Pack the frames
#********************************************************************
self.box1.pack_start(self.det_frame,padding=15)
self.box1.pack_end(self.exp_frame, padding =15)
self.conv_box.pack_start(self.box1,padding=15)
self.conv_box.pack_start(self.conv_frame,padding=5)
self.conv_box.pack_start(self.run_conversion, False,False,10)
self.conv_box.pack_start(self.show_info, False,False,10)
self.page_conversion.pack_start(self.conv_box,False, False,20)
#********************************************************************
# Conversion XRDML data to HDF
#********************************************************************
self.XRDML_conv_box = gtk.VBox()
self.Instrument_table = gtk.Table(1,4,True)
self.Inst_txt = gtk.Label("Instrument:")
self.Inst_txt.set_alignment(0,0.5)
self.Instrument = gtk.combo_box_new_text()
self.Instrument.append_text("Bruker")
self.Instrument.append_text("PANalytical")
self.Instrument.set_active(0)
self.Instrument_table.attach(self.Inst_txt,0,1,0,1)
self.Instrument_table.attach(self.Instrument, 1,2,0,1)
self.Instrument.connect("changed",self.Change_Lab_Instrument)
self.choosen_instrument = self.Instrument.get_active_text()
self.XRDML_table = gtk.Table(7,4,True)
self.XRDML_tooltip = gtk.Tooltips()
self.XRDML_substrate_txt = gtk.Label("Substrate material:")
self.XRDML_substrate_other_txt = gtk.Label("If other:")
self.XRDML_substrate_inplane_txt= gtk.Label("In-plane direction (i.e. 1 1 0) - optional")
self.XRDML_substrate_outplane_txt= gtk.Label("Out-of-plane direction (i.e. 0 0 1)-optional")
self.XRDML_reflection_txt = gtk.Label("Reflection (H K L) - optional:")
self.XRDML_energy_txt = gtk.Label("Energy (eV) - optional:")
self.XRDML_description_txt = gtk.Label("Description of the sample:")
self.XRDML_xrdml_file_txt = gtk.Label("Select RAW file:")
self.XRDML_destination_txt = gtk.Label("Select a destination folder:")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_txt, "Substrate material")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_other_txt, "The substrate material, i.e. Al, SiO2, CdTe, GaN,...")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_inplane_txt, "The substrate in-plane an out-of-plane direction - for calculation of the orientation matrix.")
self.XRDML_tooltip.set_tip(self.XRDML_reflection_txt, "H K L, separate by space, i.e. 2 2 4 (0 0 0 for a XRR map). This is used for offset correction.")
self.XRDML_tooltip.set_tip(self.XRDML_energy_txt, "If empty, the default Cu K_alpha_1 will be used.")
self.XRDML_tooltip.set_tip(self.XRDML_description_txt, "Description of the sample, this will be the name of the converted file. If empty, it will be named 'RSM.h5'")
self.XRDML_tooltip.set_tip(self.XRDML_xrdml_file_txt, "Select the data file recorded by the chosen equipment")
self.XRDML_tooltip.set_tip(self.XRDML_destination_txt, "Select a destination folder to store the converted file.")
self.XRDML_substrate_txt.set_alignment(0,0.5)
self.XRDML_substrate_other_txt.set_alignment(1,0.5)
self.XRDML_substrate_inplane_txt.set_alignment(0,0.5)
self.XRDML_substrate_outplane_txt.set_alignment(1,0.5)
self.XRDML_reflection_txt.set_alignment(0,0.5)
self.XRDML_energy_txt.set_alignment(0,0.5)
self.XRDML_description_txt.set_alignment(0,0.5)
self.XRDML_xrdml_file_txt.set_alignment(0,0.5)
self.XRDML_destination_txt.set_alignment(0,0.5)
self.XRDML_substrate = gtk.combo_box_new_text()
self.XRDML_substrate.append_text("-- other")
self.XRDML_substrate.append_text("Si")
self.XRDML_substrate.append_text("Ge")
self.XRDML_substrate.append_text("GaAs")
self.XRDML_substrate.append_text("GaN")
self.XRDML_substrate.append_text("GaP")
self.XRDML_substrate.append_text("GaSb")
self.XRDML_substrate.append_text("InAs")
self.XRDML_substrate.append_text("InP")
self.XRDML_substrate.append_text("InSb")
self.XRDML_substrate.append_text("Al2O3")
self.XRDML_substrate.set_active(0)
self.XRDML_substrate_other = gtk.Entry()
self.XRDML_substrate_other.set_text("")
self.XRDML_substrate_inplane = gtk.Entry()
self.XRDML_substrate_inplane.set_text("")
self.XRDML_substrate_outplane = gtk.Entry()
self.XRDML_substrate_outplane.set_text("")
self.XRDML_reflection = gtk.Entry()
self.XRDML_reflection.set_text("")
self.XRDML_energy = gtk.Entry()
self.XRDML_energy.set_text("")
self.XRDML_description = gtk.Entry()
self.XRDML_description.set_text("")
self.XRDML_xrdml_file_path = gtk.Entry()
self.XRDML_destination_path = gtk.Entry()
self.XRDML_xrdml_file_browse = gtk.Button("Browse RAW file")
self.XRDML_destination_browse= gtk.Button("Browse destination folder")
self.XRDML_xrdml_file_browse.connect("clicked", self.select_file, self.XRDML_xrdml_file_path, "S")
self.XRDML_destination_browse.connect("clicked", self.select_folder, self.XRDML_destination_path, "D")
self.XRDML_table.attach(self.XRDML_substrate_txt, 0,1,0,1)
self.XRDML_table.attach(self.XRDML_substrate, 1,2,0,1)
self.XRDML_table.attach(self.XRDML_substrate_other_txt, 2,3,0,1)
self.XRDML_table.attach(self.XRDML_substrate_other, 3,4,0,1)
self.XRDML_table.attach(self.XRDML_substrate_inplane_txt, 0,1,1,2)
self.XRDML_table.attach(self.XRDML_substrate_inplane, 1,2,1,2)
self.XRDML_table.attach(self.XRDML_substrate_outplane_txt, 2,3,1,2)
self.XRDML_table.attach(self.XRDML_substrate_outplane, 3,4,1,2)
self.XRDML_table.attach(self.XRDML_reflection_txt, 0,1,2,3)
self.XRDML_table.attach(self.XRDML_reflection, 1,2,2,3)
self.XRDML_table.attach(self.XRDML_energy_txt,0,1,3,4)
self.XRDML_table.attach(self.XRDML_energy, 1,2,3,4)
self.XRDML_table.attach(self.XRDML_description_txt, 0,1,4,5)
self.XRDML_table.attach(self.XRDML_description, 1,2,4,5)
self.XRDML_table.attach(self.XRDML_xrdml_file_txt, 0,1,5,6)
self.XRDML_table.attach(self.XRDML_xrdml_file_path, 1,2,5,6)
self.XRDML_table.attach(self.XRDML_xrdml_file_browse, 2,3,5,6)
self.XRDML_table.attach(self.XRDML_destination_txt, 0,1,6,7)
self.XRDML_table.attach(self.XRDML_destination_path, 1,2,6,7)
self.XRDML_table.attach(self.XRDML_destination_browse, 2,3,6,7)
#********************************************************************
# The RUN button
#********************************************************************
self.XRDML_run = gtk.Button("Execute")
self.XRDML_run.connect("clicked", self.Convert_Lab_Source)
self.XRDML_run.set_size_request(50,30)
self.XRDML_show_info = gtk.Label()
#********************************************************************
# Pack the XRDML options
#********************************************************************
self.XRDML_conv_box.pack_start(self.Instrument_table, False, False,5)
self.XRDML_conv_box.pack_start(self.XRDML_table, False, False, 10)
self.XRDML_conv_box.pack_start(self.XRDML_run, False, False, 5)
self.XRDML_conv_box.pack_start(self.XRDML_show_info, False,False,10)
self.page_XRDML.pack_start(self.XRDML_conv_box,False, False,20)
#********************************************************************
# Pack the notebook
#********************************************************************
self.notebook.append_page(self.page_GUI, gtk.Label("RSM GUI"))
self.notebook.append_page(self.page_conversion, gtk.Label("ESRF-MCA spec file (Vantec)"))
self.notebook.append_page(self.page_XRDML, gtk.Label("Lab instruments"))
hbox.pack_start(self.notebook)
vbox.pack_start(hbox,True,True,0)
############################### Sliders ######################################
#sld_box = gtk.Fixed()
sld_box = gtk.HBox(False,2)
self.vmin_txt = gtk.Label("Vmin")
self.vmin_txt.set_alignment(0,0.5)
#self.vmin_txt.set_justify(gtk.JUSTIFY_CENTER)
self.vmax_txt = gtk.Label("Vmax")
self.vmax_txt.set_alignment(0,0.5)
#self.vmax_txt.set_justify(gtk.JUSTIFY_CENTER)
self.sld_vmin = gtk.HScale()
self.sld_vmax = gtk.HScale()
self.sld_vmin.set_size_request(200,25)
self.sld_vmax.set_size_request(200,25)
self.sld_vmin.set_range(0,self.vmax)
self.sld_vmax.set_range(0,self.vmax)
self.sld_vmax.set_value(self.vmax)
self.sld_vmin.set_value(0)
self.sld_vmin.connect('value-changed',self.scale_update)
self.sld_vmax.connect('value-changed',self.scale_update)
vmax_spin_adj = gtk.Adjustment(self.vmax, 0, self.vmax_range, 0.5, 10.0, 0.0)
self.vmax_spin_btn = gtk.SpinButton(vmax_spin_adj,1,1)
self.vmax_spin_btn.set_numeric(True)
self.vmax_spin_btn.set_wrap(True)
self.vmax_spin_btn.set_size_request(80,-1)
#self.vmax_spin_btn.set_alignment(0,0.5)
self.vmax_spin_btn.connect('value-changed',self.scale_update_spin)
vmin_spin_adj = gtk.Adjustment(self.vmin, 0, self.vmax_range, 0.5, 10.0, 0.0)
self.vmin_spin_btn = gtk.SpinButton(vmin_spin_adj,1,1)
self.vmin_spin_btn.set_numeric(True)
self.vmin_spin_btn.set_wrap(True)
self.vmin_spin_btn.set_size_request(80,-1)
#self.vmax_spin_btn.set_alignment(0,0.5)
self.vmin_spin_btn.connect('value-changed',self.scale_update_spin)
sld_box.pack_start(self.vmin_txt,False,False,0)
sld_box.pack_start(self.sld_vmin,False,False,0)
sld_box.pack_start(self.vmin_spin_btn,False,False,0)
sld_box.pack_start(self.vmax_txt,False,False,0)
sld_box.pack_start(self.sld_vmax,False,False,0)
sld_box.pack_start(self.vmax_spin_btn,False,False,0)
#sld_box.pack_start(self.slider_reset_btn,False,False,0)
vbox.pack_start(sld_box,False,False,3)
self.add(vbox)
self.connect("destroy", gtk.main_quit)
self.show_all()
#########################################################################################################################
def format_coord(self, x, y):
#***** Add intensity information into the navigation toolbar *******************************
numrows, numcols = (self.gridder.data.T).shape
col,row = xu.analysis.line_cuts.getindex(x, y, self.gridder.xaxis, self.gridder.yaxis)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = self.gridder.data.T[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4f, y=%1.4f'%(x, y)
def pro_format_coord(self,x,y):
return 'x=%.4f, y=%.1f'%(x,y)
def init_image(self,log=False):
self.ax.cla()
self.cax.cla()
#print "Initialize image ..."
#
#self.clevels = np.linspace(self.vmin, self.vmax, 100)
if log:
self.img = self.ax.pcolormesh(self.gridder.xaxis, self.gridder.yaxis, np.log10(self.gridder.data.T),vmin=self.vmin, vmax=self.vmax)
#self.img = self.ax.contour(self.gridder.xaxis, self.gridder.yaxis, np.log10(self.gridder.data.T), self.clevels, vmin=self.vmin, vmax=self.vmax)
else:
self.img = self.ax.pcolormesh(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data.T,vmin=self.vmin, vmax=self.vmax)
#self.img = self.ax.contour(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data.T, self.clevels, vmin=self.vmin, vmax=self.vmax)
self.img.cmap.set_under(alpha=0)
self.ax.axis([self.gridder.xaxis.min(), self.gridder.xaxis.max(), self.gridder.yaxis.min(), self.gridder.yaxis.max()])
#self.ax.set_aspect('equal')
xlabel = r'$Q_x (nm^{-1})$'
ylabel = r'$Q_z (nm^{-1})$'
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
self.ax.yaxis.label.set_size(20)
self.ax.xaxis.label.set_size(20)
self.ax.set_title(self.rsm_description,fontsize=20)
self.ax.format_coord = self.format_coord
self.cb = self.fig.colorbar(self.img, cax = self.cax, format="%.1f")#format=fm
if self.log_scale==1:
self.cb.set_label(r'$Log_{10}\ (Intensity)\ [arb.\ units]$',fontsize=20)
else:
self.cb.set_label(r'$Intensity\ (Counts\ per\ second)$', fontsize=20)
self.cb.locator = MaxNLocator(nbins=6)
#self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
#print "Image is initialized."
def change_aspect_ratio(self,w):
self.graph_aspect = not (self.graph_aspect)
if self.graph_aspect == True:
self.ax.set_aspect('equal')
else:
self.ax.set_aspect('auto')
self.canvas.draw()
def on_changed_rsm(self,widget,row,col):
#print "************Change RSM*************"
gc.collect() #Clear unused variables to gain memory
#************** Remind the structure of these HDF5 files:
# ************* file=[scan_id={'eta'=[data], '2theta'=[data], 'intensity'=[data], 'description'='RSM 004 ...'}]
self.clear_notes()
#self.init_image()
model = widget.get_model()
self.rsm_choosen = model[row][0]
self.rsm = join(self.GUI_current_folder,self.rsm_choosen)#file path
self.rsm_info = h5.File(self.rsm,'r')#HDF5 object that collects all information of this scan
#self.ax.set_title(self.rsm_choosen,fontsize=20)
### Data Loading ##
groups = self.rsm_info.keys()
scan = groups[0]
self.scan = self.rsm_info[scan]
self.data = self.scan.get('intensity').value
self.Qx = self.scan.get('Qx').value
self.Qy = self.scan.get('Qy').value
self.Qz = self.scan.get('Qz').value
self.rsm_description = self.scan.get('description').value
self.rsm_info.close()
#print "Data are successfully loaded."
self.gridder = xu.Gridder2D(self.data.shape[0],self.data.shape[1])
#print "Gridder is calculated."
# MM = self.data.max()
# M = np.log10(MM)
# data = flat_data(self.data,0,M)
self.gridder(self.Qx, self.Qz, self.data)
self.data = self.gridder.data.T
self.vmin=self.data.min()
self.vmax=self.data.max()
#print "Starting scale_plot()"
self.scale_plot()
#self.slider_update()
def scale_plot(self):
#print "Scale_plot() is called."
data = self.data.copy()
#self.init_image()
if self.linear_scale_btn.get_active():
self.linear_scale_btn.set_label("--> Linear scale")
data = np.log10(data)
#print data.max()
self.init_image(log=True)
actual_vmin = self.sld_vmin.get_value()
actual_vmax = self.sld_vmax.get_value()
self.vmax = np.log10(actual_vmax) if self.log_scale == 0 else actual_vmax
if actual_vmin == 0:
self.vmin=0
elif actual_vmin >0:
self.vmin = np.log10(actual_vmin) if self.log_scale == 0 else actual_vmin
self.vmax_range = data.max()
self.log_scale = 1
#log=True
else:
self.linear_scale_btn.set_label("--> Log scale")
self.init_image(log=False)
#print "Calculating min max and update slider..."
actual_vmin = self.sld_vmin.get_value()
actual_vmax = self.sld_vmax.get_value()
#print "Actual vmax: ",actual_vmax
if self.log_scale == 1:
self.vmax = np.power(10.,actual_vmax)
else:
self.vmax = actual_vmax
self.vmax_range = data.max()
if actual_vmin ==0:
self.vmin = 0
elif actual_vmin>0:
if self.log_scale == 0:
self.vmin = actual_vmin
elif self.log_scale == 1:
self.vmin = np.power(10,actual_vmin)
self.log_scale = 0
#log=False
#print "Min max are calculated."
self.sld_vmax.set_range(-6,self.vmax_range)
self.sld_vmin.set_range(-6,self.vmax_range)
#self.init_image(log)
self.slider_update()
def log_update(self,widget):
self.scale_plot()
if self.log_scale==1:
self.cb.set_label(r'$Log_{10}\ (Counts\ per\ second)\ [arb.\ units]$',fontsize=18)
else:
self.cb.set_label(r'$Intensity\ (Counts\ per\ second)$', fontsize=18)
#self.slider_update()
def scale_update(self,widget):
#print "Scale_update() is called."
self.vmin = self.sld_vmin.get_value()
self.vmax = self.sld_vmax.get_value()
self.vmin_spin_btn.set_value(self.vmin)
self.vmax_spin_btn.set_value(self.vmax)
self.slider_update()
def scale_update_spin(self,widget):
#print "Spin_update() is called"
self.vmin = self.vmin_spin_btn.get_value()
self.vmax = self.vmax_spin_btn.get_value()
self.slider_update()
def slider_update(self):
#print "slider_update() is called"
#self.img.set_clim(self.vmin, self.vmax)
self.sld_vmax.set_value(self.vmax)
self.sld_vmin.set_value(self.vmin)
if self.linear_scale_btn.get_active():
self.vmin_spin_btn.set_adjustment(gtk.Adjustment(self.vmin, 0, self.vmax_range, 0.1, 1.0, 0))
self.vmax_spin_btn.set_adjustment(gtk.Adjustment(self.vmax, 0, self.vmax_range, 0.1, 1.0, 0))
else:
self.vmin_spin_btn.set_adjustment(gtk.Adjustment(self.vmin, 0, self.vmax_range, 10, 100, 0))
self.vmax_spin_btn.set_adjustment(gtk.Adjustment(self.vmax, 0, self.vmax_range, 10, 100, 0))
#self.vmax_spin_btn.update()
self.img.set_clim(self.vmin, self.vmax)
self.ax.relim()
self.canvas.draw()
#print "slider_update() stoped."
def choose_folder(self, w):
dialog = gtk.FileChooserDialog(title="Select a data folder",action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.GUI_current_folder)
response=dialog.run()
if response==gtk.RESPONSE_OK:
folder=dialog.get_filename()
folder = folder.decode('utf8')
folder_basename = folder.split("/")[-1]
#print folder_basename
self.store= [i for i in listdir(folder) if isfile(join(folder,i)) and i.endswith(".data") or i.endswith(".h5")]
self.GUI_current_folder = folder
#print store
if len(self.store)>0:
self.list_store.clear()
for i in self.store:
self.list_store.append([i])
self.TVcolumn.set_title(folder_basename)
else:
pass
else:
pass
dialog.destroy()
def folder_update(self, w):
folder = self.GUI_current_folder
if folder is not os.getcwd():
store= [i for i in listdir(folder) if isfile(join(folder,i)) and i.endswith(".data") or i.endswith(".h5")]
self.store=[]
self.list_store.clear()
for i in store:
self.list_store.append([i])
self.store.append(i)
def arbitrary_line_cut(self, x, y):
#**** num: integer - number of points to be extracted
#**** convert Q coordinates to pixel coordinates
x0, y0 = xu.analysis.line_cuts.getindex(x[0], y[0], self.gridder.xaxis, self.gridder.yaxis)
x1, y1 = xu.analysis.line_cuts.getindex(x[1], y[1], self.gridder.xaxis, self.gridder.yaxis)
num = int(np.hypot(x1-x0, y1-y0)) #number of points that will be plotted
xi, yi = np.linspace(x0, x1, num), np.linspace(y0, y1, num)
profiles_data_X = profiles_data_Y = scipy.ndimage.map_coordinates(self.gridder.data, np.vstack((xi,yi)))
coor_X_export,coor_Y_export = np.linspace(x[0], x[1], num), np.linspace(y[0], y[1], num)
#coor_X_export = np.sort(coor_X_export)
#coor_Y_export = np.sort(coor_Y_export)
return coor_X_export,coor_Y_export, profiles_data_X, profiles_data_Y
def boundary_rectangles(self, x, y):
"""
IN : x[0,1], y[0,1]: positions of the line cut (arbitrary direction)
OUT: ROI rectangle: the rectangle in which the data will be taken
Bound rectangle: the limit values for Qx, Qz line cuts (min, max)
"""
x = np.asarray(x)
y = np.asarray(y)
alpha = np.arctan(abs((y[1]-y[0])/(x[1]-x[0]))) # inclined angle of the ROI w.r.t the horizontal line. Attention to the sign of alpha
#print np.degrees(alpha)
T = self.largueur_int/2.
if np.degrees(alpha)>55.0:
inc_x = 1
inc_y = 0
else:
inc_x = 0
inc_y = 1
y1 = y + T*inc_y
y2 = y - T*inc_y
x1 = x + T*inc_x
x2 = x - T*inc_x
#These positions are in reciprocal space units. The boundary order will be: 1-2-2-1
roi_rect = [[y1[0],x1[0]],[y2[0],x2[0]],[y2[1],x2[1]],[y1[1],x1[1]],[y1[0],x1[0]]]
roi_rect = path.Path(roi_rect)
#***************** Get the corresponding index of these points ***************************
i1,j1 = xu.analysis.line_cuts.getindex(x1[0], y1[0], self.gridder.xaxis, self.gridder.yaxis)
i2,j2 = xu.analysis.line_cuts.getindex(x2[0], y2[0], self.gridder.xaxis, self.gridder.yaxis)
i3,j3 = xu.analysis.line_cuts.getindex(x2[1], y2[1], self.gridder.xaxis, self.gridder.yaxis)
i4,j4 = xu.analysis.line_cuts.getindex(x1[1], y1[1], self.gridder.xaxis, self.gridder.yaxis)
roi_box = [[j1,i1],[j2,i2],[j3,i3],[j4,i4],[j1,i1]]
roi_box = path.Path(roi_box)
#******* Calculate the limit boundary rectangle
y_tmp = np.vstack((y1, y2))
x_tmp = np.vstack((x1, x2))
y_min = y_tmp.min()
y_max = y_tmp.max()
x_min = x_tmp.min()
x_max = x_tmp.max()
bound_rect = [x_min, x_max, y_min, y_max]
bound_rect = np.asarray(bound_rect)
contours = roi_rect.vertices
p=self.ax.plot(contours[:,1], contours[:,0], linewidth=1.5, color='white')
self.polygons.append(p[0])
self.canvas.draw()
return roi_box, bound_rect
def extract_roi_data(self, roi_box, bound_rect):
#***** Extraction of the ROI defined by the ROI box ******************
qx_min = bound_rect[0]
qx_max = bound_rect[1]
qz_min = bound_rect[2]
qz_max = bound_rect[3]
#***** Getting index of the boundary points in order to calculate the length of the extracted array
ixmin, izmin = xu.analysis.line_cuts.getindex(qx_min, qz_min, self.gridder.xaxis, self.gridder.yaxis)
ixmax, izmax = xu.analysis.line_cuts.getindex(qx_max, qz_max, self.gridder.xaxis, self.gridder.yaxis)
x_steps = ixmax - ixmin +1
z_steps = izmax - izmin +1
qx_coor = np.linspace(qx_min, qx_max, x_steps)
qz_coor = np.linspace(qz_min, qz_max, z_steps)
ROI = np.zeros(shape=(x_steps))
#****** Extract Qx line cuts ************************
for zi in range(izmin, izmax+1):
qx_int = self.gridder.data[ixmin:ixmax+1,zi]
#****** if the point is inside the ROI box: point = 0
inpoints = []
for i in range(ixmin,ixmax+1):
inpoint= roi_box.contains_point([zi,i])
inpoints.append(inpoint)
for b in range(len(inpoints)):
if inpoints[b]==False:
qx_int[b] = 0
ROI = np.vstack((ROI, qx_int))
ROI = np.delete(ROI, 0, 0) #Delete the first line which contains zeros
#****** Sum them up! Return Qx, Qz projection zones and Qx,Qz intensity
qx_ROI = ROI.sum(axis=0)/ROI.shape[0]
qz_ROI = ROI.sum(axis=1)/ROI.shape[1]
return qx_coor, qx_ROI, qz_coor, qz_ROI
def plot_profiles(self, x, y, cross_line=True):
if cross_line:
"""Drawing lines where I want to plot profiles"""
# ******** if this is not an arbitrary profile, x and y are not lists but just one individual point
x=x[0]
y=y[0]
hline = self.ax.axhline(y, color='k', ls='--', lw=1)
self.lines.append(hline)
vline = self.ax.axvline(x, color='k', ls='--', lw=1)
self.lines.append(vline)
"""Getting data to be plotted"""
self.coor_X_export, self.profiles_data_X = xu.analysis.line_cuts.get_qx_scan(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data, y, qrange=self.largueur_int)
self.coor_Y_export, self.profiles_data_Y = xu.analysis.line_cuts.get_qz_scan(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data, x, qrange=self.largueur_int)
xc = x
yc = y
""" Fitting information """
ix,iy = xu.analysis.line_cuts.getindex(x, y, self.gridder.xaxis, self.gridder.yaxis)
ix_left,iy = xu.analysis.line_cuts.getindex(x-self.fitting_width, y, self.gridder.xaxis, self.gridder.yaxis)
qx_2_fit = self.coor_X_export[ix_left:ix*2-ix_left+1]
qx_int_2_fit = self.profiles_data_X[ix_left:2*ix-ix_left+1]
X_fitted_params, X_fitted_data = fit(qx_2_fit, qx_int_2_fit,xc, cross_line)
####################axX.plot(qx_2_fit, qx_fit_data, color='red',linewidth=2)
ix,iy_down = xu.analysis.line_cuts.getindex(x, y-self.fitting_width, self.gridder.xaxis, self.gridder.yaxis)
qz_2_fit = self.coor_Y_export[iy_down:iy*2-iy_down+1]
qz_int_2_fit = self.profiles_data_Y[iy_down:iy*2-iy_down+1]
Y_fitted_params, Y_fitted_data = fit(qz_2_fit, qz_int_2_fit,yc, cross_line)
####################axY.plot(qz_2_fit, qz_fit_data, color='red',linewidth=2)
else:
#**** extract arbitrary line cut
#**** extract one single line cut:
if not self.rectangle_profiles_btn.get_active():
self.coor_X_export, self.coor_Y_export, self.profiles_data_X, self.profiles_data_Y = self.arbitrary_line_cut(x,y)
else:
roi_box,bound_rect = self.boundary_rectangles(x,y)
self.coor_X_export, self.profiles_data_X, self.coor_Y_export, self.profiles_data_Y = self.extract_roi_data(roi_box, bound_rect)
tmpX = np.sort(self.coor_X_export)
tmpY = np.sort(self.coor_Y_export)
xc = tmpX[self.profiles_data_X.argmax()]
yc = tmpY[self.profiles_data_Y.argmax()]
""" Fitting information """
X_fitted_params, X_fitted_data = fit(self.coor_X_export, self.profiles_data_X, xc, not cross_line)
Y_fitted_params, Y_fitted_data = fit(self.coor_Y_export, self.profiles_data_Y, yc, not cross_line)
qx_2_fit = self.coor_X_export
qz_2_fit = self.coor_Y_export
""" Plotting profiles """
self.profiles_ax1.cla()
self.profiles_ax2.cla()
self.profiles_ax1.format_coord = self.pro_format_coord
self.profiles_ax2.format_coord = self.pro_format_coord
#self.cursor_pro1 = Cursor(self.profiles_ax1, color='k', linewidth=1, useblit=True)
#self.cursor_pro2 = Cursor(self.profiles_ax2, color='k', linewidth=1, useblit=True)
self.profiles_ax1.plot(self.coor_Y_export, self.profiles_data_Y, color='blue', lw=3)
self.profiles_ax1.plot(qz_2_fit, Y_fitted_data, color='red', lw=1.5, alpha=0.8)
self.profiles_ax2.plot(self.coor_X_export, self.profiles_data_X, color='blue', lw=3)
self.profiles_ax2.plot(qx_2_fit, X_fitted_data, color='red', lw=1.5, alpha=0.8)
self.profiles_ax1.set_title("Qz profile", size=14)
self.profiles_ax2.set_title("Qx profile", size=14)
self.profiles_canvas.draw()
# Show the fitted results
self.Qz_fitted_y0.set_text("%.4f"%Y_fitted_params['y0'].value)
self.Qz_fitted_xc.set_text("%.4f"%Y_fitted_params['xc'].value)
self.Qz_fitted_A.set_text("%.4f"%Y_fitted_params['A'].value)
self.Qz_fitted_w.set_text("%.4f"%Y_fitted_params['w'].value)
self.Qz_fitted_mu.set_text("%.4f"%Y_fitted_params['mu'].value)
self.Qx_fitted_y0.set_text("%.4f"%X_fitted_params['y0'].value)
self.Qx_fitted_xc.set_text("%.4f"%X_fitted_params['xc'].value)
self.Qx_fitted_A.set_text("%.4f"%X_fitted_params['A'].value)
self.Qx_fitted_w.set_text("%.4f"%X_fitted_params['w'].value)
self.Qx_fitted_mu.set_text("%.4f"%X_fitted_params['mu'].value)
self.profiles_refresh()
self.canvas.draw()
def draw_pointed(self, x, y, finished=False):
#if len(self.lines)>0:
# self.clear_notes()
p=self.ax.plot(x,y,'ro')
self.points.append(p[0])
if finished:
l=self.ax.plot(self.arb_lines_X, self.arb_lines_Y, '--',linewidth=1.5, color='white')
self.lines.append(l[0])
self.canvas.draw()
def profiles_refresh(self):
""" """
if self.profiles_log_btn.get_active():
self.profiles_ax1.set_yscale('log')
self.profiles_ax2.set_yscale('log')
else:
self.profiles_ax1.set_yscale('linear')
self.profiles_ax2.set_yscale('linear')
self.profiles_canvas.draw()
#return
def profiles_update(self, widget):
self.profiles_refresh()
def profiles_export(self,widget):
""" Export X,Y profiles data in the same folder as the EDF image """
proX_fname = self.rsm.split(".")[0]+"_Qx_profile.dat"
proY_fname = self.rsm.split(".")[0]+"_Qz_profile.dat"
proX_export= np.vstack([self.coor_X_export, self.profiles_data_X])
proX_export=proX_export.T
proY_export= np.vstack([self.coor_Y_export, self.profiles_data_Y])
proY_export=proY_export.T
try:
np.savetxt(proX_fname, proX_export)
np.savetxt(proY_fname, proY_export)
self.popup_info('info','Data are successfully exported!')
except:
self.popup_info('error','ERROR! Data not exported!')
def on_press(self, event):
#******************** Plot X,Y cross profiles ***************************************************
if (event.inaxes == self.ax) and (event.button==3) and self.plotXYprofiles_btn.get_active():
x = event.xdata
y = event.ydata
xx=[]
yy=[]
xx.append(x)
yy.append(y)
self.clear_notes()
try:
self.largueur_int = float(self.int_range.get_text())
self.fitting_width = float(self.fitting_range.get_text())
self.plot_profiles(xx,yy,cross_line=True)
except:
self.popup_info("error","Please check that you have entered all the parameters correctly !")
#******************** Plot arbitrary profiles ***************************************************
elif (event.inaxes == self.ax) and (event.button==1) and (self.arbitrary_profiles_btn.get_active() or self.rectangle_profiles_btn.get_active()):
#self.clear_notes()
try:
self.largueur_int = float(self.int_range.get_text())
self.fitting_width = float(self.fitting_range.get_text())
except:
self.popup_info("error","Please check that you have entered all the parameters correctly !")
self.arb_line_points +=1
#print "Number of points clicked: ",self.arb_line_points
if self.arb_line_points>2:
self.clear_notes()
self.arb_line_points=1
x = event.xdata
y = event.ydata
self.arb_lines_X.append(x)
self.arb_lines_Y.append(y)
if len(self.arb_lines_X)<2:
finished=False
elif len(self.arb_lines_X)==2:
finished = True
self.draw_pointed(x,y,finished)#If finished clicking, connect the two points by a line
if finished:
self.plot_profiles(self.arb_lines_X, self.arb_lines_Y, cross_line=False)
self.arb_lines_X=[]
self.arb_lines_Y=[]
#self.canvas.draw()
#******************** Clear cross lines in the main image ****************************************
elif event.button==2:
self.clear_notes()
def profile_press(self, event):
""" Calculate thickness fringes """
if event.inaxes == self.profiles_ax1:
draw_fringes = True
ax = self.profiles_ax1
X_data = self.coor_Y_export
Y_data = self.profiles_data_Y
xlabel = r'$Q_z (nm^{-1})$'
title = "Linear regression of Qz fringes"
title_FFT = "Fast Fourier Transform of Qz profiles"
xlabel_FFT= "Period (nm)"
elif event.inaxes == self.profiles_ax2:
draw_fringes = True
ax = self.profiles_ax2
X_data = self.coor_X_export
Y_data = self.profiles_data_X
xlabel = r'$Q_x (nm^{-1})$'
title = "Linear regression of Qx fringes"
title_FFT = "Fast Fourier Transform of Qx profiles"
xlabel_FFT= "Period (nm)"
else:
draw_fringes = False
if draw_fringes and (event.button==1):
if len(self.profiles_fringes)>0:
self.profiles_fringes = np.asarray(self.profiles_fringes)
self.profiles_fringes = np.sort(self.profiles_fringes)
fringes_popup = PopUpFringes(self.profiles_fringes, xlabel, "Fringes order", title)
self.profiles_fringes=[]
self.clear_notes()
elif draw_fringes and (event.button == 3):
vline=ax.axvline(event.xdata, linewidth=2, color="green")
self.lines.append(vline)
self.profiles_fringes.append(event.xdata)
elif draw_fringes and event.button == 2:
XF,YF = Fourier(X_data, Y_data)
popup_window=PopUpImage(XF, YF, xlabel_FFT, "Normalized intensity", title_FFT)
self.profiles_canvas.draw()
#plt.clf()
def clear_notes(self):
"""
print "Number of notes: ",len(self.my_notes)
print "Number of lines: ",len(self.lines)
print "Number of points: ",len(self.points)
print "Number of polygons: ",len(self.polygons)
"""
if len(self.my_notes)>0:
for txt in self.my_notes:
txt.remove()
if len(self.lines)>0:
for line in self.lines:
line.remove()
if len(self.points)>0:
for p in self.points:
p.remove()
if len(self.polygons)>0:
for p in self.polygons:
p.remove()
self.canvas.draw()
self.my_notes = []
#self.profiles_notes = []
self.lines=[]
self.points=[]
self.polygons=[]
self.arb_lines_X=[]
self.arb_lines_Y=[]
self.arb_line_points = 0
def on_motion(self,event):
print "Mouse moved !"
if event.inaxes == self.ax and self.arbitrary_profiles_btn.get_active() and self.arb_line_points==1:
x = event.xdata
y = event.ydata
self.clear_notes()
line = self.ax.plot([self.arb_lines_X[0], x], [self.arb_lines_Y[0],y], 'ro-')
self.lines.append(line)
self.canvas.draw()
def on_release(self, event):
if event.inaxes == self.ax:
if self.mouse_moved==True:
self.mouse_moved = False
def popup_info(self,info_type,text):
""" info_type = WARNING, INFO, QUESTION, ERROR """
if info_type.upper() == "WARNING":
mess_type = gtk.MESSAGE_WARNING
elif info_type.upper() == "INFO":
mess_type = gtk.MESSAGE_INFO
elif info_type.upper() == "ERROR":
mess_type = gtk.MESSAGE_ERROR
elif info_type.upper() == "QUESTION":
mess_type = gtk.MESSAGE_QUESTION
self.warning=gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT, mess_type, gtk.BUTTONS_CLOSE,text)
self.warning.run()
self.warning.destroy()
#********************************************************************
# Functions for the Spec-HDF5 data conversion
#********************************************************************
def select_file(self,widget,path,label):
dialog = gtk.FileChooserDialog("Select file",None,gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.DATA_current_folder)
response = dialog.run()
if response == gtk.RESPONSE_OK:
file_choosen = dialog.get_filename()
path.set_text(file_choosen)
self.DATA_current_folder = os.path.dirname(file_choosen)
if label == "A":
self.attenuation_file = file_choosen.decode('utf8')
elif label == "S":
self.spec_file = file_choosen.decode('utf8')
elif label == "M":
self.mca_file = file_choosen.decode('utf8')
else:
pass
dialog.destroy()
def select_folder(self, widget, path, label):
dialog = gtk.FileChooserDialog(title="Select folder",action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.DATA_current_folder)
response=dialog.run()
if response==gtk.RESPONSE_OK:
folder=dialog.get_filename()
path.set_text(folder)
self.DATA_current_folder = folder.decode('utf8')
if label == "D":
self.des_folder = folder.decode('utf8')
else:
pass
dialog.destroy()
def HKL2Q(self,H,K,L,a):
""" Q// est dans la direction [110], Qz // [001]"""
Qx = H*np.sqrt(2.)/a
Qy = K*np.sqrt(2.)/a
Qz = L/a
return [Qx, Qy, Qz]
def loadAmap(self,scanid,specfile,mapData,retard):
try:
psdSize = float(self.t1_entry.get_text())
Nchannels = int(self.t2_entry.get_text())
psdMin = int(self.t5_entry1.get_text())
psdMax = int(self.t5_entry2.get_text())
psd0 = float(self.t3_entry.get_text())
pixelSize = psdSize/Nchannels
pixelPerDeg = float(self.t4_entry.get_text())
distance = pixelSize * pixelPerDeg / np.tan(np.radians(1.0)) # sample-detector distance in mm
psdor = self.t6_entry.get_active() #psd orientation (up, down, in, out)
if psdor == 0:
psdor = 'z+'
elif psdor == 1:
psdor = 'z-'
else:
psdor = 'unknown'
energy = float(self.e2_entry.get_text())
filter_data = self.attenuation_file
monitor_col = self.e5_entry.get_text()
foil_col = self.e4_entry.get_text()
monitor_ref = float(self.e6_entry.get_text())
#****************** Calculation ************************
headers, scan_kappa = SP.ReadSpec(specfile,scanid)
Eta = scan_kappa['Eta']
print Eta.shape
tth = headers['P'][0]
omega = headers['P'][1]
tth = float(tth)
omega = float(omega)
print "Del: %.2f, Eta: %.2f"%(tth,omega)
#Si = xu.materials.Si
hxrd = xu.HXRD(self.substrate.Q(self.in_plane), self.substrate.Q(self.out_of_plane), en = energy)
hxrd.Ang2Q.init_linear(psdor,psd0, Nchannels, distance=distance, pixelwidth=pixelSize, chpdeg=pixelPerDeg)
HKL = hxrd.Ang2HKL(omega, tth)
HKL = np.asarray(HKL)
HKL = HKL.astype(int)
print "HKL = ",HKL
H=K=L=np.zeros(shape=(0,Nchannels))
for i in range(len(Eta)):
om=Eta[i]
q=hxrd.Ang2HKL(om,tth,mat=self.substrate,dettype='linear')
H = np.vstack((H,q[0]))
K = np.vstack((K,q[1]))
L = np.vstack((L,q[2]))
filtre_foil = scan_kappa[foil_col]
filtre = filtre_foil.copy()
monitor= scan_kappa[monitor_col]
foil_data = np.loadtxt(filter_data)
for f in xrange(foil_data.shape[0]):
coef = filtre_foil == f
filtre[coef] = foil_data[f,1]
#print filtre
mapData = mapData + 1e-6
if retard:
for i in range(len(filtre)-1):
mapData[i+1] = mapData[i+1]*filtre[i]
else:
for i in range(len(filtre)):
mapData[i] = mapData[i]*filtre[i]
for i in range(len(monitor)):
mapData[i] = mapData[i]*monitor_ref/monitor[i]
mapData = mapData[:,psdMin:psdMax]
H = H[:,psdMin:psdMax]
K = K[:,psdMin:psdMax]
L = L[:,psdMin:psdMax]
########## Correction d'offset ###############
x,y=np.unravel_index(np.argmax(mapData),mapData.shape)
H_sub = H[x,y]
K_sub = K[x,y]
L_sub = L[x,y]
H_offset = HKL[0] - H_sub
K_offset = HKL[1] - K_sub
L_offset = HKL[2] - L_sub
H = H + H_offset
K = K + K_offset
L = L + L_offset
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
Q = self.HKL2Q(H, K, L, a)
return Q,mapData
except:
self.popup_info("warning", "Please make sure that you have correctly entered the all parameters.")
return None,None
def gtk_waiting(self):
while gtk.events_pending():
gtk.main_iteration()
def Change_Lab_Instrument(self, widget):
self.choosen_instrument = self.Instrument.get_active_text()
print "I choose ",self.choosen_instrument
if self.choosen_instrument == "Bruker":
self.XRDML_xrdml_file_txt.set_text("Select RAW file: ")
self.XRDML_xrdml_file_browse.set_label("Browse RAW file")
elif self.choosen_instrument == "PANalytical":
self.XRDML_xrdml_file_txt.set_text("Select XRDML file: ")
self.XRDML_xrdml_file_browse.set_label("Browse XRDML file")
def Convert_Lab_Source(self, widget):
print "Instrument chosen: ",self.choosen_instrument
energy = self.XRDML_energy.get_text()
if energy == "":
energy = 8048
else:
energy = float(energy)
self.lam = xu.lam2en(energy)/10
HKL = self.XRDML_reflection.get_text()
if HKL == "":
self.offset_correction = False
else:
self.offset_correction = True
HKL = HKL.split()
HKL = np.asarray([int(i) for i in HKL])
self.HKL = HKL
substrate = self.XRDML_substrate.get_active_text()
if substrate == "-- other":
substrate = self.XRDML_substrate_other.get_text()
command = "self.substrate = xu.materials."+substrate
exec(command)
in_plane = self.XRDML_substrate_inplane.get_text()
out_of_plane = self.XRDML_substrate_outplane.get_text()
if in_plane != "" and out_of_plane != "":
in_plane = in_plane.split()
self.in_plane = np.asarray([int(i) for i in in_plane])
out_of_plane = out_of_plane.split()
self.out_of_plane = np.asarray([int(i) for i in out_of_plane])
self.has_orientation_matrix = True
self.experiment = xu.HXRD(self.substrate.Q(self.in_plane),self.substrate.Q(self.out_of_plane), en=energy)
else:
self.has_orientation_matrix = False
self.experiment = xu.HXRD(self.substrate.Q(1,1,0),self.substrate.Q(0,0,1), en=energy)
if self.choosen_instrument == "Bruker":
self.Bruker2HDF()
elif self.choosen_instrument == "PANalytical":
self.XRDML2HDF()
def XRDML2HDF(self):
try:
xrdml_file = self.spec_file
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
description = self.XRDML_description.get_text()
self.XRDML_show_info.set_text("Reading XRDML data ...")
self.gtk_waiting()
dataFile = xu.io.XRDMLFile(xrdml_file)
scan = dataFile.scan
omega_exp = scan['Omega']
tth_exp = scan['2Theta']
data = scan['detector']
if self.has_orientation_matrix:
omega,tth,psd = xu.io.getxrdml_map(xrdml_file)
[qx,qy,qz] = self.experiment.Ang2Q(omega, tth)
mapData = psd.reshape(data.shape)
H = qy.reshape(data.shape)
K = qy.reshape(data.shape)
L = qz.reshape(data.shape)
else:
mapData = data
psi = omega_exp - tth_exp/2.
Qmod= 2.*np.sin(np.radians(tth_exp/2.))/self.lam
Qx = Qmod * np.sin(np.radians(psi))
Qz = Qmod * np.cos(np.radians(psi))
H=K = Qx*a/np.sqrt(2.0)
L = Qz*a
########## Correction d'offset ###############
if self.offset_correction:
x,y=np.unravel_index(np.argmax(mapData),mapData.shape)
H_sub = H[x,y]
K_sub = K[x,y]
L_sub = L[x,y]
H_offset = self.HKL[0] - H_sub
K_offset = self.HKL[1] - K_sub
L_offset = self.HKL[2] - L_sub
H = H + H_offset
K = K + K_offset
L = L + L_offset
Q = self.HKL2Q(H, K, L, a)
self.XRDML_show_info.set_text("XRDML data are successfully loaded.")
self.gtk_waiting()
if description == "":
no_description = True
description = "XRDML_Map"
else:
no_description = False
h5file = description+".h5"
info = "\nSaving file: %s"%(h5file)
self.XRDML_show_info.set_text(info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
s = h5file.create_group(description)
s.create_dataset('intensity', data=mapData, compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=description)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def Bruker2HDF(self):
try:
raw_file = self.spec_file
from MCA_GUI.Bruker import convert_raw_to_uxd,get_Bruker
uxd_file = raw_file.split(".")[0]+".uxd"
convert_raw_to_uxd(raw_file, uxd_file)
description = self.XRDML_description.get_text()
self.XRDML_show_info.set_text("Reading Raw data ...")
self.gtk_waiting()
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
dataset = get_Bruker(uxd_file)
theta = dataset['omega']
dTheta = dataset['tth']
Qhkl = self.experiment.Ang2HKL(theta, dTheta)
Qx,Qy,Qz = Qhkl[0],Qhkl[1],Qhkl[2]
########## Correction d'offset ###############
if self.offset_correction:
x,y=np.unravel_index(np.argmax(dataset['data']),dataset['data'].shape)
Hsub = Qhkl[0][x,y]
Ksub = Qhkl[1][x,y]
Lsub = Qhkl[2][x,y]
Qx = Qhkl[0]+self.HKL[0]-Hsub
Qy = Qhkl[1]+self.HKL[1]-Ksub
Qz = Qhkl[2]+self.HKL[2]-Lsub
Q = self.HKL2Q(Qx, Qy, Qz, a)
self.XRDML_show_info.set_text("Raw data are successfully loaded.")
self.gtk_waiting()
if description == "":
no_description = True
description = "RSM"
else:
no_description = False
h5file = description+".h5"
info = "\nSaving file: %s"%(h5file)
self.XRDML_show_info.set_text(info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
s = h5file.create_group(description)
s.create_dataset('intensity', data=dataset['data'], compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=description)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def spec2HDF(self,widget):
try:
specfile = self.spec_file
mcafile = self.mca_file
scan_beg = int(self.c4_entry1.get_text())
scan_end = int(self.c4_entry2.get_text())
substrate = self.e1_entry.get_active_text()
if substrate == "-- other":
substrate = self.e1_entry_other.get_text()
command = "self.substrate = xu.materials."+substrate
exec(command)
scanid = range(scan_beg, scan_end+1)
self.show_info.set_text("Reading MCA data ...")
self.gtk_waiting()
allMaps = SP.ReadMCA2D_complete(mcafile)
description = self.c5_entry1.get_text()
retard = self.c6_entry.get_active()
total = len(allMaps)
total_maps_loaded = "Number of map(s) loaded: %d"%total
self.show_info.set_text(total_maps_loaded)
self.gtk_waiting()
if description == "":
no_description = True
else:
description = description.split(",")
no_description = False
for i in range(len(allMaps)):
scannumber = scanid[i]
scan_name = "Scan_%d"%scannumber
if no_description:
h5file = scan_name+".h5"
d = scan_name
else:
h5file = description[i].strip()+".h5"
d = description[i].strip()
info = "\nSaving file N# %d/%d: %s"%(i+1,total,h5file)
out_info = total_maps_loaded + info
self.show_info.set_text(out_info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
Q,mapdata = self.loadAmap(scannumber, specfile, allMaps[i], retard)
s = h5file.create_group(scan_name)
s.create_dataset('intensity', data=mapdata, compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=d)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def Export_HQ_Image(self, widget):
dialog = gtk.FileChooserDialog(title="Save image", action=gtk.FILE_CHOOSER_ACTION_SAVE, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
filename = self.rsm_choosen.split(".")[0] if self.rsm_choosen != "" else "Img"
dialog.set_current_name(filename+".png")
#dialog.set_filename(filename)
dialog.set_current_folder(self.GUI_current_folder)
filtre = gtk.FileFilter()
filtre.set_name("images")
filtre.add_pattern("*.png")
filtre.add_pattern("*.jpg")
filtre.add_pattern("*.pdf")
filtre.add_pattern("*.ps")
filtre.add_pattern("*.eps")
dialog.add_filter(filtre)
filtre = gtk.FileFilter()
filtre.set_name("Other")
filtre.add_pattern("*")
dialog.add_filter(filtre)
response = dialog.run()
if response==gtk.RESPONSE_OK:
#self.fig.savefig(dialog.get_filename())
xlabel = r'$Q_x (nm^{-1})$'
ylabel = r'$Q_z (nm^{-1})$'
fig = plt.figure(figsize=(10,8),dpi=100)
ax = fig.add_axes([0.12,0.2,0.7,0.7])
cax = fig.add_axes([0.85,0.2,0.03,0.7])
clabel = r'$Intensity\ (Counts\ per\ second)$'
fmt = "%d"
if self.linear_scale_btn.get_active():
clabel = r'$Log_{10}\ (Intensity)\ [arb.\ units]$'
fmt = "%.2f"
data = self.gridder.data.T
data = flat_data(data, self.vmin, self.vmax, self.linear_scale_btn.get_active())
img = ax.contourf(self.gridder.xaxis, self.gridder.yaxis, data, 100, vmin=self.vmin*1.1, vmax=self.vmax)
cb = fig.colorbar(img,cax=cax, format=fmt)
cb.set_label(clabel, fontsize=20)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.yaxis.label.set_size(20)
ax.xaxis.label.set_size(20)
ax.set_title(self.rsm_description,fontsize=20)
fig.savefig(dialog.get_filename())
plt.close()
dialog.destroy()
if __name__=="__main__":
MyMainWindow()
gtk.main()
| gpl-2.0 |
ElDeveloper/qiita | qiita_db/meta_util.py | 2 | 20723 | r"""
Util functions (:mod: `qiita_db.meta_util`)
===========================================
..currentmodule:: qiita_db.meta_util
This module provides utility functions that use the ORM objects. ORM objects
CANNOT import from this file.
Methods
-------
..autosummary::
:toctree: generated/
get_lat_longs
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os import stat, rename
from os.path import join, relpath, basename
from time import strftime, localtime
import matplotlib.pyplot as plt
import matplotlib as mpl
from base64 import b64encode
from urllib.parse import quote
from io import BytesIO
from datetime import datetime
from collections import defaultdict, Counter
from tarfile import open as topen, TarInfo
from hashlib import md5
from re import sub
from json import loads, dump, dumps
from qiita_db.util import create_nested_path
from qiita_core.qiita_settings import qiita_config, r_client
from qiita_core.configuration_manager import ConfigurationManager
import qiita_db as qdb
def _get_data_fpids(constructor, object_id):
"""Small function for getting filepath IDS associated with data object
Parameters
----------
constructor : a subclass of BaseData
E.g., RawData, PreprocessedData, or ProcessedData
object_id : int
The ID of the data object
Returns
-------
set of int
"""
with qdb.sql_connection.TRN:
obj = constructor(object_id)
return {fpid for fpid, _, _ in obj.get_filepaths()}
def validate_filepath_access_by_user(user, filepath_id):
"""Validates if the user has access to the filepath_id
Parameters
----------
user : User object
The user we are interested in
filepath_id : int
The filepath id
Returns
-------
bool
If the user has access or not to the filepath_id
Notes
-----
Admins have access to all files so True is always returned
"""
TRN = qdb.sql_connection.TRN
with TRN:
if user.level == "admin":
# admins have access all files
return True
sql = """SELECT
(SELECT array_agg(artifact_id)
FROM qiita.artifact_filepath
WHERE filepath_id = {0}) AS artifact,
(SELECT array_agg(study_id)
FROM qiita.sample_template_filepath
WHERE filepath_id = {0}) AS sample_info,
(SELECT array_agg(prep_template_id)
FROM qiita.prep_template_filepath
WHERE filepath_id = {0}) AS prep_info,
(SELECT array_agg(analysis_id)
FROM qiita.analysis_filepath
WHERE filepath_id = {0}) AS analysis""".format(filepath_id)
TRN.add(sql)
arid, sid, pid, anid = TRN.execute_fetchflatten()
# artifacts
if arid:
# [0] cause we should only have 1
artifact = qdb.artifact.Artifact(arid[0])
if artifact.visibility == 'public':
# TODO: https://github.com/biocore/qiita/issues/1724
if artifact.artifact_type in ['SFF', 'FASTQ', 'FASTA',
'FASTA_Sanger',
'per_sample_FASTQ']:
study = artifact.study
has_access = study.has_access(user, no_public=True)
if (not study.public_raw_download and not has_access):
return False
return True
else:
study = artifact.study
if study:
# let's take the visibility via the Study
return artifact.study.has_access(user)
else:
analysis = artifact.analysis
return analysis in (
user.private_analyses | user.shared_analyses)
# sample info files
elif sid:
# the visibility of the sample info file is given by the
# study visibility
# [0] cause we should only have 1
return qdb.study.Study(sid[0]).has_access(user)
# prep info files
elif pid:
# the prep access is given by it's artifacts, if the user has
# access to any artifact, it should have access to the prep
# [0] cause we should only have 1
pt = qdb.metadata_template.prep_template.PrepTemplate(
pid[0])
a = pt.artifact
# however, the prep info file could not have any artifacts attached
# , in that case we will use the study access level
if a is None:
return qdb.study.Study(pt.study_id).has_access(user)
else:
if (a.visibility == 'public' or a.study.has_access(user)):
return True
else:
for c in a.descendants.nodes():
if ((c.visibility == 'public' or
c.study.has_access(user))):
return True
return False
# analyses
elif anid:
# [0] cause we should only have 1
aid = anid[0]
analysis = qdb.analysis.Analysis(aid)
return analysis in (
user.private_analyses | user.shared_analyses)
return False
def update_redis_stats():
"""Generate the system stats and save them in redis
Returns
-------
list of str
artifact filepaths that are not present in the file system
"""
STUDY = qdb.study.Study
number_studies = {'public': 0, 'private': 0, 'sandbox': 0}
number_of_samples = {'public': 0, 'private': 0, 'sandbox': 0}
num_studies_ebi = 0
num_samples_ebi = 0
number_samples_ebi_prep = 0
stats = []
missing_files = []
per_data_type_stats = Counter()
for study in STUDY.iter():
st = study.sample_template
if st is None:
continue
# counting samples submitted to EBI-ENA
len_samples_ebi = sum([esa is not None
for esa in st.ebi_sample_accessions.values()])
if len_samples_ebi != 0:
num_studies_ebi += 1
num_samples_ebi += len_samples_ebi
samples_status = defaultdict(set)
for pt in study.prep_templates():
pt_samples = list(pt.keys())
pt_status = pt.status
if pt_status == 'public':
per_data_type_stats[pt.data_type()] += len(pt_samples)
samples_status[pt_status].update(pt_samples)
# counting experiments (samples in preps) submitted to EBI-ENA
number_samples_ebi_prep += sum([
esa is not None
for esa in pt.ebi_experiment_accessions.values()])
# counting studies
if 'public' in samples_status:
number_studies['public'] += 1
elif 'private' in samples_status:
number_studies['private'] += 1
else:
# note that this is a catch all for other status; at time of
# writing there is status: awaiting_approval
number_studies['sandbox'] += 1
# counting samples; note that some of these lines could be merged with
# the block above but I decided to split it in 2 for clarity
if 'public' in samples_status:
number_of_samples['public'] += len(samples_status['public'])
if 'private' in samples_status:
number_of_samples['private'] += len(samples_status['private'])
if 'sandbox' in samples_status:
number_of_samples['sandbox'] += len(samples_status['sandbox'])
# processing filepaths
for artifact in study.artifacts():
for adata in artifact.filepaths:
try:
s = stat(adata['fp'])
except OSError:
missing_files.append(adata['fp'])
else:
stats.append(
(adata['fp_type'], s.st_size, strftime('%Y-%m',
localtime(s.st_mtime))))
num_users = qdb.util.get_count('qiita.qiita_user')
num_processing_jobs = qdb.util.get_count('qiita.processing_job')
lat_longs = dumps(get_lat_longs())
summary = {}
all_dates = []
# these are some filetypes that are too small to plot alone so we'll merge
# in other
group_other = {'html_summary', 'tgz', 'directory', 'raw_fasta', 'log',
'biom', 'raw_sff', 'raw_qual', 'qza', 'html_summary_dir',
'qza', 'plain_text', 'raw_barcodes'}
for ft, size, ym in stats:
if ft in group_other:
ft = 'other'
if ft not in summary:
summary[ft] = {}
if ym not in summary[ft]:
summary[ft][ym] = 0
all_dates.append(ym)
summary[ft][ym] += size
all_dates = sorted(set(all_dates))
# sorting summaries
ordered_summary = {}
for dt in summary:
new_list = []
current_value = 0
for ad in all_dates:
if ad in summary[dt]:
current_value += summary[dt][ad]
new_list.append(current_value)
ordered_summary[dt] = new_list
plot_order = sorted([(k, ordered_summary[k][-1]) for k in ordered_summary],
key=lambda x: x[1])
# helper function to generate y axis, modified from:
# http://stackoverflow.com/a/1094933
def sizeof_fmt(value, position):
number = None
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(value) < 1024.0:
number = "%3.1f%s" % (value, unit)
break
value /= 1024.0
if number is None:
number = "%.1f%s" % (value, 'Yi')
return number
all_dates_axis = range(len(all_dates))
plt.locator_params(axis='y', nbins=10)
plt.figure(figsize=(20, 10))
for k, v in plot_order:
plt.plot(all_dates_axis, ordered_summary[k], linewidth=2, label=k)
plt.xticks(all_dates_axis, all_dates)
plt.legend()
plt.grid()
ax = plt.gca()
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(sizeof_fmt))
plt.xticks(rotation=90)
plt.xlabel('Date')
plt.ylabel('Storage space per data type')
plot = BytesIO()
plt.savefig(plot, format='png')
plot.seek(0)
img = 'data:image/png;base64,' + quote(b64encode(plot.getbuffer()))
time = datetime.now().strftime('%m-%d-%y %H:%M:%S')
portal = qiita_config.portal
# making sure per_data_type_stats has some data so hmset doesn't fail
if per_data_type_stats == {}:
per_data_type_stats['No data'] = 0
vals = [
('number_studies', number_studies, r_client.hmset),
('number_of_samples', number_of_samples, r_client.hmset),
('per_data_type_stats', dict(per_data_type_stats), r_client.hmset),
('num_users', num_users, r_client.set),
('lat_longs', (lat_longs), r_client.set),
('num_studies_ebi', num_studies_ebi, r_client.set),
('num_samples_ebi', num_samples_ebi, r_client.set),
('number_samples_ebi_prep', number_samples_ebi_prep, r_client.set),
('img', img, r_client.set),
('time', time, r_client.set),
('num_processing_jobs', num_processing_jobs, r_client.set)]
for k, v, f in vals:
redis_key = '%s:stats:%s' % (portal, k)
# important to "flush" variables to avoid errors
r_client.delete(redis_key)
f(redis_key, v)
# preparing vals to insert into DB
vals = dumps(dict([x[:-1] for x in vals]))
sql = """INSERT INTO qiita.stats_daily (stats, stats_timestamp)
VALUES (%s, NOW())"""
qdb.sql_connection.perform_as_transaction(sql, [vals])
return missing_files
def get_lat_longs():
"""Retrieve the latitude and longitude of all the public samples in the DB
Returns
-------
list of [float, float]
The latitude and longitude for each sample in the database
"""
with qdb.sql_connection.TRN:
# getting all the public studies
studies = qdb.study.Study.get_by_status('public')
results = []
if studies:
# we are going to create multiple union selects to retrieve the
# latigute and longitude of all available studies. Note that
# UNION in PostgreSQL automatically removes duplicates
sql_query = """
SELECT {0}, CAST(sample_values->>'latitude' AS FLOAT),
CAST(sample_values->>'longitude' AS FLOAT)
FROM qiita.sample_{0}
WHERE sample_values->>'latitude' != 'NaN' AND
sample_values->>'longitude' != 'NaN' AND
isnumeric(sample_values->>'latitude') AND
isnumeric(sample_values->>'longitude')"""
sql = [sql_query.format(s.id) for s in studies]
sql = ' UNION '.join(sql)
qdb.sql_connection.TRN.add(sql)
# note that we are returning set to remove duplicates
results = qdb.sql_connection.TRN.execute_fetchindex()
return results
def generate_biom_and_metadata_release(study_status='public'):
"""Generate a list of biom/meatadata filepaths and a tgz of those files
Parameters
----------
study_status : str, optional
The study status to search for. Note that this should always be set
to 'public' but having this exposed helps with testing. The other
options are 'private' and 'sandbox'
"""
studies = qdb.study.Study.get_by_status(study_status)
qiita_config = ConfigurationManager()
working_dir = qiita_config.working_dir
portal = qiita_config.portal
bdir = qdb.util.get_db_files_base_dir()
time = datetime.now().strftime('%m-%d-%y %H:%M:%S')
data = []
for s in studies:
# [0] latest is first, [1] only getting the filepath
sample_fp = relpath(s.sample_template.get_filepaths()[0][1], bdir)
for a in s.artifacts(artifact_type='BIOM'):
if a.processing_parameters is None or a.visibility != study_status:
continue
merging_schemes, parent_softwares = a.merging_scheme
software = a.processing_parameters.command.software
software = '%s v%s' % (software.name, software.version)
for x in a.filepaths:
if x['fp_type'] != 'biom' or 'only-16s' in x['fp']:
continue
fp = relpath(x['fp'], bdir)
for pt in a.prep_templates:
categories = pt.categories()
platform = ''
target_gene = ''
if 'platform' in categories:
platform = ', '.join(
set(pt.get_category('platform').values()))
if 'target_gene' in categories:
target_gene = ', '.join(
set(pt.get_category('target_gene').values()))
for _, prep_fp in pt.get_filepaths():
if 'qiime' not in prep_fp:
break
prep_fp = relpath(prep_fp, bdir)
# format: (biom_fp, sample_fp, prep_fp, qiita_artifact_id,
# platform, target gene, merging schemes,
# artifact software/version,
# parent sofware/version)
data.append((fp, sample_fp, prep_fp, a.id, platform,
target_gene, merging_schemes, software,
parent_softwares))
# writing text and tgz file
ts = datetime.now().strftime('%m%d%y-%H%M%S')
tgz_dir = join(working_dir, 'releases')
create_nested_path(tgz_dir)
tgz_name = join(tgz_dir, '%s-%s-building.tgz' % (portal, study_status))
tgz_name_final = join(tgz_dir, '%s-%s.tgz' % (portal, study_status))
txt_lines = [
"biom fp\tsample fp\tprep fp\tqiita artifact id\tplatform\t"
"target gene\tmerging scheme\tartifact software\tparent software"]
with topen(tgz_name, "w|gz") as tgz:
for biom_fp, sample_fp, prep_fp, aid, pform, tg, ms, asv, psv in data:
txt_lines.append("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (
biom_fp, sample_fp, prep_fp, aid, pform, tg, ms, asv, psv))
tgz.add(join(bdir, biom_fp), arcname=biom_fp, recursive=False)
tgz.add(join(bdir, sample_fp), arcname=sample_fp, recursive=False)
tgz.add(join(bdir, prep_fp), arcname=prep_fp, recursive=False)
info = TarInfo(name='%s-%s-%s.txt' % (portal, study_status, ts))
txt_hd = BytesIO()
txt_hd.write(bytes('\n'.join(txt_lines), 'ascii'))
txt_hd.seek(0)
info.size = len(txt_hd.read())
txt_hd.seek(0)
tgz.addfile(tarinfo=info, fileobj=txt_hd)
with open(tgz_name, "rb") as f:
md5sum = md5()
for c in iter(lambda: f.read(4096), b""):
md5sum.update(c)
rename(tgz_name, tgz_name_final)
vals = [
('filepath', tgz_name_final[len(working_dir):], r_client.set),
('md5sum', md5sum.hexdigest(), r_client.set),
('time', time, r_client.set)]
for k, v, f in vals:
redis_key = '%s:release:%s:%s' % (portal, study_status, k)
# important to "flush" variables to avoid errors
r_client.delete(redis_key)
f(redis_key, v)
def generate_plugin_releases():
"""Generate releases for plugins
"""
ARCHIVE = qdb.archive.Archive
qiita_config = ConfigurationManager()
working_dir = qiita_config.working_dir
commands = [c for s in qdb.software.Software.iter(active=True)
for c in s.commands if c.post_processing_cmd is not None]
tnow = datetime.now()
ts = tnow.strftime('%m%d%y-%H%M%S')
tgz_dir = join(working_dir, 'releases', 'archive')
create_nested_path(tgz_dir)
tgz_dir_release = join(tgz_dir, ts)
create_nested_path(tgz_dir_release)
for cmd in commands:
cmd_name = cmd.name
mschemes = [v for _, v in ARCHIVE.merging_schemes().items()
if cmd_name in v]
for ms in mschemes:
ms_name = sub('[^0-9a-zA-Z]+', '', ms)
ms_fp = join(tgz_dir_release, ms_name)
create_nested_path(ms_fp)
pfp = join(ms_fp, 'archive.json')
archives = {k: loads(v)
for k, v in ARCHIVE.retrieve_feature_values(
archive_merging_scheme=ms).items()
if v != ''}
with open(pfp, 'w') as f:
dump(archives, f)
# now let's run the post_processing_cmd
ppc = cmd.post_processing_cmd
# concatenate any other parameters into a string
params = ' '.join(["%s=%s" % (k, v) for k, v in
ppc['script_params'].items()])
# append archives file and output dir parameters
params = ("%s --fp_archive=%s --output_dir=%s" % (
params, pfp, ms_fp))
ppc_cmd = "%s %s %s" % (
ppc['script_env'], ppc['script_path'], params)
p_out, p_err, rv = qdb.processing_job._system_call(ppc_cmd)
p_out = p_out.rstrip()
if rv != 0:
raise ValueError('Error %d: %s' % (rv, p_out))
p_out = loads(p_out)
# tgz-ing all files
tgz_name = join(tgz_dir, 'archive-%s-building.tgz' % ts)
tgz_name_final = join(tgz_dir, 'archive.tgz')
with topen(tgz_name, "w|gz") as tgz:
tgz.add(tgz_dir_release, arcname=basename(tgz_dir_release))
# getting the release md5
with open(tgz_name, "rb") as f:
md5sum = md5()
for c in iter(lambda: f.read(4096), b""):
md5sum.update(c)
rename(tgz_name, tgz_name_final)
vals = [
('filepath', tgz_name_final[len(working_dir):], r_client.set),
('md5sum', md5sum.hexdigest(), r_client.set),
('time', tnow.strftime('%m-%d-%y %H:%M:%S'), r_client.set)]
for k, v, f in vals:
redis_key = 'release-archive:%s' % k
# important to "flush" variables to avoid errors
r_client.delete(redis_key)
f(redis_key, v)
| bsd-3-clause |
anonymouscontributor/cnr | cnr/LossFunctions.py | 1 | 28917 | '''
A collection of LossFunction classes
@date: May 7, 2015
'''
import numpy as np
import os, ctypes, uuid
from _ctypes import dlclose
from subprocess import call
from matplotlib import pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
from scipy.linalg import orth, eigh
from scipy.integrate import nquad
from scipy.stats import uniform, gamma
from cvxopt import matrix, spmatrix, solvers
from .Domains import nBox, UnionOfDisjointnBoxes, DifferenceOfnBoxes
class LossFunction(object):
""" Base class for LossFunctions """
def val(self, points):
""" Returns values of the loss function at the specified points """
raise NotImplementedError
def val_grid(self, x, y):
""" Computes the value of the loss on rectangular grid (for now assume 2dim) """
points = np.array([[a,b] for a in x for b in y])
return self.val(points).reshape((x.shape[0], y.shape[0]))
def plot(self, points):
""" Creates a 3D plot of the density via triangulation of
the density value at the points """
if points.shape[1] != 2:
raise Exception('Can only plot functions in dimension 2.')
pltpoints = points[self.domain.iselement(points)]
vals = self.val(pltpoints)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(pltpoints[:,0], pltpoints[:,1], vals,
cmap=plt.get_cmap('jet'), linewidth=0.2)
ax.set_xlabel('$s_1$'), ax.set_ylabel('$s_2$'), ax.set_zlabel('l$(z)$')
ax.set_title('Loss')
plt.show()
return fig
def grad(self, points):
""" Returns gradient of the loss function at the specified points """
raise NotImplementedError
def Hessian(self, points):
""" Returns Hessian of the loss function at the specified points """
raise NotImplementedError
def proj_gradient(self, x, step):
""" Returns next iterate of a projected gradient step """
grad_step = x - step*self.grad(x)
return self.domain.project(grad_step)
class ZeroLossFunction(LossFunction):
""" An zero loss function in n dimensions (for coding consistency) """
def __init__(self, domain):
""" ZeroLossFunction with l(s) = 0. """
self.domain = domain
self.desc = 'Zero'
def val(self, points):
return np.zeros(points.shape[0])
def max(self):
return 0
def min(self):
return 0
def grad(self, points):
return np.zeros_like(points)
def __add__(self, lossfunc):
""" Add a loss function object to the ZeroLossFunction """
return lossfunc
def norm(self, p):
return 0
def gen_ccode(self):
return ['double f(int n, double args[n]){\n',
' double loss = 0.0;\n']
class AffineLossFunction(LossFunction):
""" An affine loss function in n dimensions """
def __init__(self, domain, a, b):
""" AffineLossFunction of the form l(s) = <a,s> + b,
where a is a vector in R^n and b is a scalar. """
if not len(a) == domain.n:
raise Exception('Dimension of a must be dimension of domain!')
self.domain = domain
self.a, self.b = np.array(a), b
self.desc = 'Affine'
def val(self, points):
return np.dot(points, self.a) + self.b
def max(self, grad=False):
""" Compute the maximum of the loss function over the domain. """
if isinstance(self.domain, nBox):
M = self.b + np.sum([np.maximum(self.a*bnd[0], self.a*bnd[1]) for bnd in self.domain.bounds])
elif isinstance(self.domain, UnionOfDisjointnBoxes):
M = self.b + np.max([np.sum([np.maximum(self.a*bnd[0], self.a*bnd[1]) for bnd in nbox.bounds]) for nbox in self.domain.nboxes])
elif isinstance(self.domain, DifferenceOfnBoxes):
M = self.b + np.sum([np.maximum(self.a*bnd[0], self.a*bnd[1]) for bnd in self.domain.outer.bounds])
else:
raise Exception(('Sorry, for now only nBox, UnionOfDisjointnBoxes and DifferenceOfnBoxes '
+ 'are supported for computing minimum and maximum of AffineLossFunctions'))
if grad:
return M, np.linalg.norm(self.a, 2)
else:
return M
def min(self, argmin=False):
""" Compute the minimum and maximum of the loss function over the domain.
This assumes that the domain is an nBox. """
if (isinstance(self.domain, nBox) or isinstance(self.domain, UnionOfDisjointnBoxes)
or isinstance(self.domain, DifferenceOfnBoxes)):
verts = self.domain.vertices()
vals = self.val(verts)
idx = np.argmin(vals)
if argmin:
return vals[idx], verts[idx]
else:
return vals[idx]
return np.min(self.val(self.domain.vertices()))
else:
raise Exception(('Sorry, for now only nBox, UnionOfDisjointnBoxes and DifferenceOfnBoxes '
+ 'are supported for computing minimum and maximum of AffineLossFunctions'))
def grad(self, points):
""" Returns the gradient of the AffineLossFunction (equal at all points) """
return np.repeat(np.array(self.a, ndmin=2), points.shape[0], axis=0)
def __add__(self, affine2):
""" Add two AffineLossFunction objects (assumes that both functions
are defined over the same domain. """
if isinstance(affine2, AffineLossFunction):
return AffineLossFunction(self.domain, self.a + affine2.a, self.b + affine2.b)
else:
raise Exception('So far can only add two affine loss functions!')
def norm(self, p, **kwargs):
""" Computes the p-Norm of the loss function over the domain """
if np.isinf(p):
return self.max()
if isinstance(self.domain, nBox):
nboxes = [self.domain]
elif isinstance(self.domain, UnionOfDisjointnBoxes):
nboxes = self.domain.nboxes
else:
raise Exception('Sorry, so far only nBox and UnionOfDisjointnBoxes are supported!')
if p == 1:
return np.sum([nbox.volume*(self.b + 0.5**np.sum([a*(bnd[1]+bnd[0]) for a,bnd in zip(self.a, nbox.bounds)]))
for nbox in nboxes])
else:
ccode = ['#include <math.h>\n\n',
'double a[{}] = {{{}}};\n\n'.format(self.domain.n, ','.join(str(ai) for ai in self.a)),
'double f(int n, double args[n]){\n',
' int i;\n',
' double loss = {};\n'.format(self.b),
' for (i=0; i<{}; i++){{\n'.format(self.domain.n),
' loss += a[i]*args[i];}\n',
' return pow(fabs(loss), {});\n'.format(p),
' }']
ranges = [nbox.bounds for nbox in nboxes]
return ctypes_integrate(ccode, ranges, **kwargs)**(1/p)
def gen_ccode(self):
return ['double a[{}] = {{{}}};\n'.format(self.domain.n, ','.join(str(a) for a in self.a)),
'double f(int n, double args[n]){\n',
' double nu = *(args + {});\n'.format(self.domain.n),
' int i;\n',
' double loss = {};\n'.format(self.b),
' for (i=0; i<{}; i++){{\n'.format(self.domain.n),
' loss += a[i]*(*(args + i));\n',
' }\n']
class QuadraticLossFunction(LossFunction):
""" Loss given by l(s) = 0.5 (s-mu)'Q(s-mu) + c, with Q>0 and c>= 0.
This assumes that mu is inside the domain! """
def __init__(self, domain, mu, Q, c):
if not domain.n == len(mu):
raise Exception('Dimension of mu must be dimension of domain!')
if not domain.n == Q.shape[0]:
raise Exception('Dimension of Q must be dimension of domain!')
if not Q.shape[0] == Q.shape[1]:
raise Exception('Matrix Q must be square!')
self.domain, self.mu, self.Q, self.c = domain, mu, Q, c
self.desc = 'Quadratic'
# implement computation of Lipschitz constant. Since gradient is
# linear, we can just look at the norm on the vertices
# self.L = self.computeL()
def val(self, points):
x = points - self.mu
return 0.5*np.sum(np.dot(x,self.Q)*x, axis=1) + self.c
def max(self, grad=False):
""" Compute the maximum of the loss function over the domain. """
if grad:
raise NotImplementedError
try:
return self.bounds['max']
except (KeyError, AttributeError) as e:
if (isinstance(self.domain, nBox) or isinstance(self.domain, UnionOfDisjointnBoxes)
or isinstance(self.domain, DifferenceOfnBoxes)):
if not isPosDef(self.Q):
M = np.max(self.val(self.domain.grid(10000))) # this is a hack
else:
M = np.max(self.val(self.domain.vertices()))
try:
self.bounds['max'] = M
except AttributeError:
self.bounds = {'max':M}
return self.bounds['max']
else:
raise Exception(('Sorry, for now only nBox, UnionOfDisjointnBoxes and DifferenceOfnBoxes '
+ 'are supported for computing minimum and maximum of AffineLossFunctions'))
def min(self, argmin=False, **kwargs):
""" Compute the minimum of the loss function over the domain. """
if isPosDef(self.Q):
if self.domain.iselement(np.array(self.mu, ndmin=2)):
minval, smin = self.c, self.mu
elif (isinstance(self.domain, nBox) or isinstance(self.domain, UnionOfDisjointnBoxes)):
minval, smin = self.find_boxmin(argmin=True)
else:
grid = self.domain.grid(50000)
vals = self.val(grid)
idxmin = np.argmin(vals)
minval, smin = vals[idxmin], grid[idxmin]
else:
grid = self.domain.grid(50000)
vals = self.val(grid)
idxmin = np.argmin(vals)
minval, smin = vals[idxmin], grid[idxmin]
if argmin:
return minval, smin
else:
return minval
def find_boxmin(self, argmin=False):
if isinstance(self.domain, nBox):
bounds = [self.domain.bounds]
elif isinstance(self.domain, UnionOfDisjointnBoxes):
bounds = [nbox.bounds for nbox in self.domain.nboxes]
else:
raise Exception('Boxmin only works on nBox or UnionOfDisjointnBoxes')
n = self.domain.n
P = matrix(self.Q, tc='d')
q = matrix(-np.dot(self.Q, self.mu), tc='d')
G = spmatrix([-1,1]*n, np.arange(2*n), np.repeat(np.arange(n), 2), tc='d')
hs = [matrix(np.array([-1,1]*n)*np.array(bound).flatten(), tc='d') for bound in bounds]
solvers.options['show_progress'] = False
results = [solvers.qp(P, q, G, h) for h in hs]
smins = np.array([np.array(res['x']).flatten() for res in results])
vals = self.val(smins)
idxmin = np.argmin(vals)
if argmin:
return vals[idxmin], smins[idxmin]
else:
return vals[idxmin]
def grad(self, points):
return np.transpose(np.dot(self.Q, np.transpose(points - self.mu)))
def Hessian(self, points):
return np.array([self.Q,]*points.shape[0])
def __add__(self, quadloss):
Qtilde = self.Q + quadloss.Q
btilde = - np.dot(self.mu, self.Q) - np.dot(quadloss.mu, quadloss.Q)
mutilde = -np.linalg.solve(Qtilde, btilde)
# print('min eval of Q: {}, ||mutilde||: {}'.format(np.min(np.abs(np.linalg.eigvalsh(Qtilde))), np.linalg.norm(mutilde,2)))
ctilde = 0.5*(2*self.c + np.dot(self.mu, np.dot(self.Q, self.mu))
+ 2*quadloss.c + np.dot(quadloss.mu, np.dot(quadloss.Q, quadloss.mu))
+ np.dot(btilde, mutilde))
return QuadraticLossFunction(self.domain, mutilde, Qtilde, ctilde)
def norm(self, p, **kwargs):
""" Computes the p-Norm of the loss function over the domain """
nboxes = None
if isinstance(self.domain, nBox):
nboxes = [self.domain]
elif isinstance(self.domain, UnionOfDisjointnBoxes):
nboxes = self.domain.nboxes
elif isinstance(self.domain, DifferenceOfnBoxes):
if (self.domain.inner) == 1:
nboxes = self.domain.to_nboxes()
if nboxes is None:
raise Exception('Sorry, so far only nBox, UnionOfDisjointnBoxes and hollowboxes are supported!')
if np.isinf(p):
return self.max()
else:
ccode = ['#include <math.h>\n\n',
'double Q[{}][{}] = {{{}}};\n'.format(self.domain.n, self.domain.n, ','.join(str(q) for row in self.Q for q in row)),
'double mu[{}] = {{{}}};\n'.format(self.domain.n, ','.join(str(m) for m in self.mu)),
'double c = {};\n\n'.format(self.c),
'double f(int n, double args[n]){\n',
' double nu = *(args + {});\n'.format(self.domain.n),
' int i,j;\n',
' double loss = c;\n',
' for (i=0; i<{}; i++){{\n'.format(self.domain.n),
' for (j=0; j<{}; j++){{\n'.format(self.domain.n),
' loss += 0.5*Q[i][j]*(args[i]-mu[i])*(args[j]-mu[j]);\n',
' }\n',
' }\n',
' return pow(fabs(loss), {});\n'.format(p),
' }']
ranges = [nbox.bounds for nbox in nboxes]
return ctypes_integrate(ccode, ranges, **kwargs)**(1/p)
def gen_ccode(self):
return ['double Q[{}][{}] = {{{}}};\n'.format(self.domain.n, self.domain.n, ','.join(str(q) for row in self.Q for q in row)),
'double mu[{}] = {{{}}};\n'.format(self.domain.n, ','.join(str(m) for m in self.mu)),
'double c = {};\n\n'.format(self.c),
'double f(int n, double args[n]){\n',
' double nu = *(args + {});\n'.format(self.domain.n),
' int i,j;\n',
' double loss = c;\n',
' for (i=0; i<{}; i++){{\n'.format(self.domain.n),
' for (j=0; j<{}; j++){{\n'.format(self.domain.n),
' loss += 0.5*Q[i][j]*(args[i]-mu[i])*(args[j]-mu[j]);\n',
' }\n',
' }\n']
class PolynomialLossFunction(LossFunction):
""" A polynomial loss function in n dimensions of arbitrary order,
represented in the basis of monomials """
def __init__(self, domain, coeffs, exponents, partials=False):
""" Construct a PolynomialLossFunction that is the sum of M monomials.
coeffs is an M-dimensional array containing the coefficients of the
monomials, and exponents is a list of n-tuples of length M, with
the i-th tuple containing the exponents of the n variables in the monomial.
For example, the polynomial l(x) = 3*x_1^3 + 2*x_1*x_3 + x2^2 + 2.5*x_2*x_3 + x_3^3
in dimension n=3 is constructed using
coeffs = [3, 2, 1, 2.5, 1] and
exponents = [(3,0,0), (1,0,1), (0,2,0), (0,1,1), (0,0,3)]
"""
if not len(coeffs) == len(exponents):
raise Exception('Size of coeffs must be size of exponents along first axis!')
if not len(exponents[0]) == domain.n:
raise Exception('Dimension of elements of coeffs must be dimension of domain!')
self.domain = domain
# remove zero coefficients (can be generated by differentiation)
self.coeffs, self.exponents = coeffs, exponents
self.m = len(self.coeffs)
self.polydict = {exps:coeff for coeff,exps in zip(self.coeffs,self.exponents)}
self.desc = 'Polynomial'
if partials:
self.partials = self.compute_partials()
def val(self, points):
monoms = np.array([points**exps for exps in self.polydict.keys()]).prod(2)
return 0 + np.sum([monom*coeff for monom,coeff in zip(monoms, self.polydict.values())], axis=0)
def max(self, grad=False):
""" Compute the maximum of the loss function over the domain. If grad=True
also compute the maximum 2-norm of the gradient over the domain.
For now resort to stupid gridding. """
grid = self.domain.grid(10000)
M = np.max(self.val(grid)) # this is a hack
if grad:
return M, np.max(np.array([np.linalg.norm(self.grad(grid), 2, axis=1)])) # this is a hack
else:
return M
def min(self, argmin=False):
""" Compute the maximum of the loss function over the domain.
For now resort to stupid gridding. """
grid = self.domain.grid(50000)
vals = self.val(grid)
idxmin = np.argmin(vals)
minval, smin = vals[idxmin], grid[idxmin]
if argmin:
return minval, smin
else:
return minval
def grad(self, points):
""" Computes the gradient of the PolynomialLossFunction at the specified points """
try:
partials = self.partials
except AttributeError:
self.partials = self.compute_partials()
partials = self.partials
return np.array([partials[i].val(points) for i in range(self.domain.n)]).T
def compute_partials(self):
partials = []
for i in range(self.domain.n):
coeffs = np.array([coeff*expon[i] for coeff,expon in zip(self.coeffs, self.exponents) if expon[i]!=0])
exponents = [expon[0:i]+(expon[i]-1,)+expon[i+1:] for expon in self.exponents if expon[i]!=0]
if len(coeffs) == 0:
partials.append(ZeroLossFunction(self.domain))
else:
partials.append(PolynomialLossFunction(self.domain, coeffs, exponents))
return partials
def __add__(self, poly2):
""" Add two PolynomialLossFunction objects (assumes that both polynomials
are defined over the same domain. """
newdict = self.polydict.copy()
for exps, coeff in poly2.polydict.items():
try:
newdict[exps] = newdict[exps] + coeff
except KeyError:
newdict[exps] = coeff
return PolynomialLossFunction(self.domain, list(newdict.values()), list(newdict.keys()))
def __mul__(self, scalar):
""" Multiply a PolynomialLossFunction object with a scalar """
return PolynomialLossFunction(self.domain, scalar*np.array(self.coeffs), self.exponents)
def __rmul__(self, scalar):
""" Multiply a PolynomialLossFunction object with a scalar """
return self.__mul__(scalar)
def norm(self, p, **kwargs):
""" Computes the p-Norm of the loss function over the domain """
if isinstance(self.domain, nBox):
nboxes = [self.domain]
elif isinstance(self.domain, UnionOfDisjointnBoxes):
nboxes = self.domain.nboxes
else:
raise Exception('Sorry, so far only nBox and UnionOfDisjointnBoxes are supported!')
if np.isinf(p):
return self.max()
else:
ccode = ['#include <math.h>\n\n',
'double c[{}] = {{{}}};\n'.format(self.m, ','.join(str(coeff) for coeff in self.coeffs)),
'double e[{}] = {{{}}};\n\n'.format(self.m*self.domain.n, ','.join(str(xpnt) for xpntgrp in self.exponents for xpnt in xpntgrp)),
'double f(int n, double args[n]){\n',
' double nu = *(args + {});\n'.format(self.domain.n),
' int i,j;\n',
' double mon;\n',
' double loss = 0.0;\n',
' for (i=0; i<{}; i++){{\n'.format(self.m),
' mon = 1.0;\n',
' for (j=0; j<{}; j++){{\n'.format(self.domain.n),
' mon = mon*pow(args[j], e[i*{}+j]);\n'.format(self.domain.n),
' }\n',
' loss += c[i]*mon;}\n',
' return pow(fabs(loss), {});\n'.format(p),
' }']
ranges = [nbox.bounds for nbox in nboxes]
return ctypes_integrate(ccode, ranges, **kwargs)**(1/p)
def gen_ccode(self):
return ['double c[{}] = {{{}}};\n'.format(self.m, ','.join(str(coeff) for coeff in self.coeffs)),
'double e[{}] = {{{}}};\n\n'.format(self.m*self.domain.n, ','.join(str(xpnt) for xpntgrp in self.exponents for xpnt in xpntgrp)),
'double f(int n, double args[n]){\n',
' double nu = *(args + {});\n'.format(self.domain.n),
' int i,j;\n',
' double mon;\n',
' double loss = 0.0;\n',
' for (i=0; i<{}; i++){{\n'.format(self.m),
' mon = 1.0;\n',
' for (j=0; j<{}; j++){{\n'.format(self.domain.n),
' mon = mon*pow(args[j], e[i*{}+j]);\n'.format(self.domain.n),
' }\n',
' loss += c[i]*mon;\n',
' }\n']
#######################################################################
# Some helper functions
#######################################################################
def ctypes_integrate(ccode, ranges, tmpfolder='libs/', **kwargs):
tmpfile = '{}{}'.format(tmpfolder, str(uuid.uuid4()))
with open(tmpfile+'.c', 'w') as file:
file.writelines(ccode)
call(['gcc', '-shared', '-o', tmpfile+'.dylib', '-fPIC', tmpfile+'.c'])
lib = ctypes.CDLL(tmpfile+'.dylib')
lib.f.restype = ctypes.c_double
lib.f.argtypes = (ctypes.c_int, ctypes.c_double)
result = np.sum([nquad(lib.f, rng)[0] for rng in ranges])
dlclose(lib._handle) # this is to release the lib, so we can import the new version
try:
os.remove(tmpfile+'.c') # clean up
os.remove(tmpfile+'.dylib') # clean up
except FileNotFoundError: pass
return result
def create_random_gammas(covs, Lbnd, dist=uniform()):
""" Creates a random scaling factor gamma for each of the covariance matrices
in the array like 'covs', based on the Lipschitz bound L. Here dist is a 'frozen'
scipy.stats probability distribution supported on [0,1] """
# compute upper bound for scaling
gammas = np.zeros(covs.shape[0])
for i,cov in enumerate(covs):
lambdamin = eigh(cov, eigvals=(0,0), eigvals_only=True)
gammamax = np.sqrt(lambdamin*np.e)*Lbnd
gammas[i] = gammamax*dist.rvs(1)
return gammas
def create_random_Sigmas(n, N, L, M, dist=gamma(2, scale=2)):
""" Creates N random nxn covariance matrices s.t. the Lipschitz
constants are uniformly bounded by Lbnd. Here dist is a 'frozen'
scipy.stats probability distribution supported on R+"""
# compute lower bound for eigenvalues
lambdamin = ((2*np.pi)**n*np.e*L**2/M**2)**(-1.0/(n+1))
Sigmas = []
for i in range(N):
# create random orthonormal matrix
V = orth(np.random.uniform(size=(n,n)))
# create n random eigenvalues from the distribution and shift them by lambdamin
lambdas = lambdamin + dist.rvs(n)
Sigma = np.zeros((n,n))
for lbda, v in zip(lambdas, V):
Sigma = Sigma + lbda*np.outer(v,v)
Sigmas.append(Sigma)
return np.array(Sigmas)
def create_random_Q(domain, mu, L, M, pd=True, H=0, dist=uniform()):
""" Creates random symmetric nxn matrix s.t. the Lipschitz constant of the resulting
quadratic function is bounded by L. Here M is the uniform bound on the maximal loss.
If pd is True, then the matrix is pos. definite and H is a lower bound on the
eigenvalues of Q. Finally, dist is a 'frozen' scipy.stats probability
distribution supported on [0,1]. """
n = domain.n
# compute upper bound for eigenvalues
Dmu = domain.compute_Dmu(mu)
lambdamax = np.min((L/Dmu, 2*M/Dmu**2))
# create random orthonormal matrix
V = orth(np.random.uniform(size=(n,n)))
# create n random eigenvalues from the distribution dist, scale them by lambdamax
if pd:
lambdas = H + (lambdamax-H)*dist.rvs(n)
else:
# randomly assign pos. and negative values to the evals
lambdas = np.random.choice((-1,1), size=n)*lambdamax*dist.rvs(n)
return np.dot(V, np.dot(np.diag(lambdas), V.T)), lambdamax
def create_random_Cs(covs, dist=uniform()):
""" Creates a random offset C for each of the covariance matrices in the
array-like 'covs'. Here dist is a 'frozen' scipy.stats probability
distribution supported on [0,1] """
C = np.zeros(covs.shape[0])
for i,cov in enumerate(covs):
pmax = ((2*np.pi)**covs.shape[1]*np.linalg.det(cov))**(-0.5)
C[i] = np.random.uniform(low=pmax, high=1+pmax)
return C
def random_AffineLosses(dom, L, T, d=2):
""" Creates T random L-Lipschitz AffineLossFunction over domain dom,
and returns uniform bound M. For now sample the a-vector uniformly
from the n-ball. Uses random samples of Beta-like distributions as
described in the funciton sample_Bnrd. """
lossfuncs, Ms = [], []
asamples = sample_Bnrd(dom.n, L, d, T)
for a in asamples:
lossfunc = AffineLossFunction(dom, a, 0)
lossmin, lossmax = lossfunc.min(), lossfunc.max()
lossfunc.b = - lossmin
lossfuncs.append(lossfunc)
Ms.append(lossmax - lossmin)
return lossfuncs, np.max(Ms)
def sample_Bnrd(n, r, d, N):
""" Draw N independent samples from the B_n(r,d) distribution
discussed in:
'R. Harman and V. Lacko. On decompositional algorithms for uniform
sampling from n-spheres and n-balls. Journal of Multivariate
Analysis, 101(10):2297 – 2304, 2010.'
"""
Bsqrt = np.sqrt(np.random.beta(n/2, d/2, size=N))
X = np.random.randn(N, n)
normX = np.linalg.norm(X, 2, axis=1)
S = X/normX[:, np.newaxis]
return r*Bsqrt[:, np.newaxis]*S
def random_QuadraticLosses(dom, mus, L, M, pd=True, H=0, dist=uniform()):
""" Creates T random L-Lipschitz QuadraticLossFunctions over the domain dom.
Here mus is an iterable of means (of length T), L and M are uniform bounds
on Lipschitzc onstant and dual norm of the losses. H is the strong convexity
parameter (here this is a lower bound on the evals of Q). dist is a 'frozen'
scipy distribution object from which to draw the values of the evals.
"""
lossfuncs, Ms, lambdamaxs = [], [], []
for mu in mus:
Q, lambdamax = create_random_Q(dom, mu, L, M, pd, H, dist)
lossfunc = QuadraticLossFunction(dom, mu, Q, 0)
c = -lossfunc.min()
lossfuncs.append(QuadraticLossFunction(dom, mu, Q, c))
Ms.append(lossfuncs[-1].max())
lambdamaxs.append(lambdamax)
return lossfuncs, np.max(Ms), np.max(lambdamaxs)
def isPosDef(Q):
""" Checks whether the numpy array Q is positive definite """
try:
np.linalg.cholesky(Q)
return True
except np.linalg.LinAlgError:
return False
def random_PolynomialLosses(dom, T, M, L, m_max, exponents, dist=uniform(), high_ratio=False):
""" Creates T random L-Lipschitz PolynomialLossFunctions uniformly bounded
(in dual norm) by M, with Lipschitz constant uniformly bounded by L.
Here exponents is a (finite) set of possible exponents.
This is a brute force implementation and horribly inefficient.
"""
lossfuncs = []
while len(lossfuncs) < T:
if high_ratio:
weights = np.ones(len(exponents))
else:
weights = np.linspace(1, 10, len(exponents))
expon = [tuple(np.random.choice(exponents, size=dom.n, p=weights/np.sum(weights)))
for i in range(np.random.choice(np.arange(2,m_max)))]
if high_ratio:
coeffs = np.array([uniform(scale=np.max(expo)).rvs(1) for expo in expon]).flatten()
else:
coeffs = dist.rvs(len(expon))
lossfunc = PolynomialLossFunction(dom, coeffs, expon)
Ml, Ll = lossfunc.max(grad=True)
ml = lossfunc.min()
if (Ml-ml)>0:
scaling = dist.rvs()*np.minimum(M/(Ml-ml), L/Ll)
else:
scaling = 1
lossfuncs.append(scaling*(lossfunc + PolynomialLossFunction(dom, [-ml], [(0,)*dom.n])))
return lossfuncs
| mit |
BDannowitz/polymath-progression-blog | jlab-hackathon/loadCode.py | 1 | 3811 | #!/usr/bin/env python
#
#
# ex_toy4
#
# Building on toy3 example, this adds drift distance
# information to the pixel color. It also adds a
# random z-vertex position in addition to the phi
# angle.
#
# The network is defined with 2 branches to calculate
# the phi and z. They share a common input layer and
# initial Dense layer then implement their own dense
# layers.
#
# Another difference from toy3 is that a final dense
# layer with a single neuron is added to each of the
# branches to calculate phi(z) parameters directly
# rather than doing that outside of the network. To
# help this, the weights feeding that last neuron are
# set to fixed weights (bin centers) and are marked
# as non-trainable.
#
import os
import sys
import gzip
import pandas as pd
import numpy as np
import math
# If running on Google Colaboratory you can uncomment the
# following and modify to use your Google Drive space.
#from google.colab import drive
#drive.mount('/content/gdrive')
#workdir = '/content/gdrive/My Drive/work/2019.03.26.trackingML/eff100_inverted'
#os.chdir( workdir )
width = 36
height = 100
# Open labels files so we can get number of samples and pass the
# data frames to the generators later
traindf = pd.read_csv('TRAIN/track_parms.csv')
validdf = pd.read_csv('VALIDATION/track_parms.csv')
STEP_SIZE_TRAIN = len(traindf)/BS
STEP_SIZE_VALID = len(validdf)/BS
#-----------------------------------------------------
# generate_arrays_from_file
#-----------------------------------------------------
# Create generator to read in images and labels
# (used for both training and validation samples)
def generate_arrays_from_file( path, labelsdf ):
images_path = path+'/images.raw.gz'
print( 'generator created for: ' + images_path)
batch_input = []
batch_labels_phi = []
batch_labels_z = []
idx = 0
ibatch = 0
while True: # loop forever, re-reading images from same file
with gzip.open(images_path) as f:
while True: # loop over images in file
# Read in one image
bytes = f.read(width*height)
if len(bytes) != (width*height): break # break into outer loop so we can re-open file
data = np.frombuffer(bytes, dtype='B', count=width*height)
pixels = np.reshape(data, [width, height, 1], order='F')
pixels_norm = np.transpose(pixels.astype(np.float) / 255., axes=(1, 0, 2) )
# Labels
phi = labelsdf.phi[idx]
z = labelsdf.z[idx]
idx += 1
# Add to batch and check if it is time to yield
batch_input.append( pixels_norm )
batch_labels_phi.append( phi )
batch_labels_z.append( z )
if len(batch_input) == BS :
ibatch += 1
# Since we are training multiple loss functions we must
# pass the labels back as a dictionary whose keys match
# the layer their corresponding values are being applied
# to.
labels_dict = {
'phi_output' : np.array(batch_labels_phi ),
'z_output' : np.array(batch_labels_z ),
}
yield ( np.array(batch_input), labels_dict )
batch_input = []
batch_labels_phi = []
batch_labels_z = []
idx = 0
f.close()
#===============================================================================
# Create training generator
train_generator = generate_arrays_from_file('TRAIN', traindf)
| gpl-2.0 |
bachiraoun/fullrmc | Constraints/PairDistributionConstraints.py | 1 | 71823 | """
PairDistributionConstraints contains classes for all constraints related
to experimental pair distribution functions.
.. inheritance-diagram:: fullrmc.Constraints.PairDistributionConstraints
:parts: 1
"""
# standard libraries imports
from __future__ import print_function
import itertools, inspect, copy, os, re
# external libraries imports
import numpy as np
from pdbparser.Utilities.Database import is_element_property, get_element_property
from pdbparser.Utilities.Collection import get_normalized_weighting
# fullrmc imports
from ..Globals import INT_TYPE, FLOAT_TYPE, PI, PRECISION, LOGGER
from ..Globals import str, long, unicode, bytes, basestring, range, xrange, maxint
from ..Core.Collection import is_number, is_integer, get_path
from ..Core.Collection import reset_if_collected_out_of_date, get_real_elements_weight
from ..Core.Collection import get_caller_frames
from ..Core.Constraint import Constraint, ExperimentalConstraint
from ..Core.pairs_histograms import multiple_pairs_histograms_coords, full_pairs_histograms_coords
from ..Constraints.Collection import ShapeFunction
class PairDistributionConstraint(ExperimentalConstraint):
"""
Controls the total reduced pair distribution function (pdf) of atomic
configuration noted as G(r). The pair distribution function is directly
calculated from powder diffraction experimental data. It is obtained
from the experimentally determined total-scattering structure
function S(Q), by a Sine Fourier transform according to.
.. math::
G(r) = \\frac{2}{\\pi} \\int_{0}^{\\infty} Q [S(Q)-1]sin(Qr)dQ \n
S(Q) = 1+ \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
Theoretically G(r) oscillates about zero. Also :math:`G(r) \\rightarrow 0`
when :math:`r \\rightarrow \\infty` and :math:`G(r) \\rightarrow 0` when
:math:`r \\rightarrow 0` with a slope of :math:`-4\\pi\\rho_{0}`
where :math:`\\rho_{0}` is the number density of the material. \n
Model wise, G(r) is computed after calculating the so called Pair
Correlation Function noted as g(r). The relation between G(r) and g(r)
is given by\n
.. math::
G(r) = 4 \\pi r (\\rho_{r} - \\rho_{0})
= 4 \\pi \\rho_{0} r (g(r)-1)
= \\frac{R(r)}{r} - 4 \\pi r \\rho_{0}
:math:`\\rho_{r}` is the number density fluctuation at distance :math:`r`.
The computation of g(r) is straightforward from an atomistic model and it
is given by :math:`g(r)=\\rho_{r} / \\rho_{0}`.\n
The radial distribution function noted :math:`R(r)` is a very important
function because it describes directly the system's structure.
:math:`R(r)dr` gives the number of atoms in an annulus of thickness
dr at distance r from another atom. Therefore, the coordination number,
or the number of neighbors within the distances interval :math:`[a,b]`
is given by :math:`\\int_{a}^{b} R(r) dr`\n
Finally, g(r) is calculated after binning all pair atomic distances into
a weighted histograms of values :math:`n(r)` from which local number
densities are computed as the following:
.. math::
g(r) = \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{\\rho_{i,j}(r)}{\\rho_{0}}
= \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{n_{i,j}(r) / v(r)}{N_{i,j} / V}
Where:\n
:math:`Q` is the momentum transfer. \n
:math:`r` is the distance between two atoms. \n
:math:`\\rho_{i,j}(r)` is the pair density function of atoms i and j. \n
:math:`\\rho_{0}` is the average number density of the system. \n
:math:`w_{i,j}` is the relative weighting of atom types i and j. \n
:math:`R(r)` is the radial distribution function (rdf). \n
:math:`N` is the total number of atoms. \n
:math:`V` is the volume of the system. \n
:math:`n_{i,j}(r)` is the number of atoms i neighbouring j at a distance r. \n
:math:`v(r)` is the annulus volume at distance r and of thickness dr. \n
:math:`N_{i,j}` is the total number of atoms i and j in the system. \n
+----------------------------------------------------------------------+
|.. figure:: pair_distribution_constraint_plot_method.png |
| :width: 530px |
| :height: 400px |
| :align: left |
+----------------------------------------------------------------------+
:Parameters:
#. experimentalData (numpy.ndarray, string): Experimental data as
numpy.ndarray or string path to load data using numpy.loadtxt.
#. dataWeights (None, numpy.ndarray): Weights array of the same number
of points of experimentalData used in the constraint's standard
error computation. Therefore particular fitting emphasis can be
put on different data points that might be considered as more or less
important in order to get a reasonable and plausible modal.\n
If None is given, all data points are considered of the same
importance in the computation of the constraint's standard error.\n
If numpy.ndarray is given, all weights must be positive and all
zeros weighted data points won't contribute to the total
constraint's standard error. At least a single weight point is
required to be non-zeros and the weights array will be automatically
scaled upon setting such as the the sum of all the weights
is equal to the number of data points.
#. weighting (string): The elements weighting scheme. It must be any
atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius) defined
in pdbparser database. In case of xrays or neutrons experimental
weights, one can simply set weighting to 'xrays' or 'neutrons'
and the value will be automatically adjusted to respectively
'atomicNumber' and 'neutronCohb'. If attribute values are
missing in the pdbparser database, atomic weights must be
given in atomsWeight dictionary argument.
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
#. scaleFactor (number): A normalization scale factor used to normalize
the computed data to the experimental ones.
#. adjustScaleFactor (list, tuple): Used to adjust fit or guess
the best scale factor during stochastic engine runtime.
It must be a list of exactly three entries.\n
#. The frequency in number of generated moves of finding the best
scale factor. If 0 frequency is given, it means that the scale
factor is fixed.
#. The minimum allowed scale factor value.
#. The maximum allowed scale factor value.
#. shapeFuncParams (None, numpy.ndarray, dict): The shape function is
subtracted from the total G(r). It must be used when non-periodic
boundary conditions are used to take into account the atomic
density drop and to correct for the :math:`\\rho_{0}` approximation.
The shape function can be set to None which means unsused, or set
as a constant shape given by a numpy.ndarray or computed
from all atoms and updated every 'updateFreq' accepted moves.
If dict is given the following keywords can be given, otherwise
default values will be automatically set.\n
* **rmin (number) default (0.00) :** The minimum distance in
:math:`\\AA` considered upon building the histogram prior to
computing the shape function. If not defined, rmin will be
set automatically to 0.
* **rmax (None, number) default (None) :** The maximum distance
in :math:`\\AA` considered upon building the histogram prior
to computing the shape function. If not defnined, rmax will
be automatically set to :math:`maximum\ box\ length + 10\\AA`
at engine runtime.
* **dr (number) default (0.5) :** The bin size in :math:`\\AA`
considered upon building the histogram prior to computing the
shape function. If not defined, it will be automatically set
to 0.5.
* **qmin (number) default (0.001) :** The minimum reciprocal
distance q in :math:`\\AA^{-1}` considered to compute the
shape function. If not defined, it will be automatically
set to 0.001.
* **qmax (number) default (0.75) :** The maximum reciprocal
distance q in :math:`\\AA^{-1}` considered to compute the
shape function. If not defined, it will be automatically
set to 0.75.
* **dq (number) default (0.005) :** The reciprocal distance bin
size in :math:`\\AA^{-1}` considered to compute the shape
function. If not defined, it will be automatically
set to 0.005.
* **updateFreq (integer) default (1000) :** The frequency of
recomputing the shape function in number of accpeted moves.
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
#. limits (None, tuple, list): The distance limits to compute the
histograms. If None is given, the limits will be automatically
set the the min and max distance of the experimental data.
Otherwise, a tuple of exactly two items where the first is the
minimum distance or None and the second is the maximum distance
or None.
**NB**: If adjustScaleFactor first item (frequency) is 0, the scale factor
will remain untouched and the limits minimum and maximum won't be checked.
.. code-block:: python
# import fullrmc modules
from fullrmc.Engine import Engine
from fullrmc.Constraints.PairDistributionConstraints import PairDistributionConstraint
# create engine
ENGINE = Engine(path='my_engine.rmc')
# set pdb file
ENGINE.set_pdb('system.pdb')
# create and add constraint
PDC = PairDistributionConstraint(experimentalData="pdf.dat", weighting="atomicNumber")
ENGINE.add_constraints(PDC)
"""
def __init__(self, experimentalData, dataWeights=None, weighting="atomicNumber",
atomsWeight=None, scaleFactor=1.0, adjustScaleFactor=(0, 0.8, 1.2),
shapeFuncParams=None, windowFunction=None, limits=None):
# initialize constraint
super(PairDistributionConstraint, self).__init__(experimentalData=experimentalData, dataWeights=dataWeights, scaleFactor=scaleFactor, adjustScaleFactor=adjustScaleFactor)
# set elements weighting
self.set_weighting(weighting)
# set atomsWeight
self.set_atoms_weight(atomsWeight)
# set window function
self.set_window_function(windowFunction)
# set shape function parameters
self.set_shape_function_parameters(shapeFuncParams)
# set frame data
FRAME_DATA = [d for d in self.FRAME_DATA]
FRAME_DATA.extend(['_PairDistributionConstraint__bin',
'_PairDistributionConstraint__minimumDistance',
'_PairDistributionConstraint__maximumDistance',
'_PairDistributionConstraint__shellCenters',
'_PairDistributionConstraint__edges',
'_PairDistributionConstraint__histogramSize',
'_PairDistributionConstraint__experimentalDistances',
'_PairDistributionConstraint__experimentalPDF',
'_PairDistributionConstraint__shellVolumes',
'_PairDistributionConstraint__elementsPairs',
'_PairDistributionConstraint__weighting',
'_PairDistributionConstraint__atomsWeight',
'_PairDistributionConstraint__weightingScheme',
'_PairDistributionConstraint__windowFunction',
'_elementsWeight',
'_shapeFuncParams',
'_shapeUpdateFreq',
'_shapeArray', ] )
RUNTIME_DATA = [d for d in self.RUNTIME_DATA]
RUNTIME_DATA.extend( ['_shapeArray'] )
object.__setattr__(self, 'FRAME_DATA', tuple(FRAME_DATA) )
object.__setattr__(self, 'RUNTIME_DATA', tuple(RUNTIME_DATA) )
def _codify_update__(self, name='constraint', addDependencies=True):
dependencies = []
code = []
if addDependencies:
code.extend(dependencies)
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
sfp = self._shapeFuncParams
if isinstance(sfp, np.ndarray):
sfp = list(sfp)
code.append("sfp = {sfp}".format(sfp=sfp))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name}.set_used({val})".format(name=name, val=self.used))
code.append("{name}.set_scale_factor({val})".format(name=name, val=self.scaleFactor))
code.append("{name}.set_adjust_scale_factor({val})".format(name=name, val=self.adjustScaleFactor))
code.append("{name}.set_data_weights(dw)".format(name=name))
code.append("{name}.set_atoms_weight({val})".format(name=name, val=self.atomsWeight))
code.append("{name}.set_window_function(wf)".format(name=name))
code.append("{name}.set_shape_function_parameters(sfp)".format(name=name))
code.append("{name}.set_limits({val})".format(name=name, val=self.limits))
# return
return dependencies, '\n'.join(code)
def _codify__(self, engine, name='constraint', addDependencies=True):
assert isinstance(name, basestring), LOGGER.error("name must be a string")
assert re.match('[a-zA-Z_][a-zA-Z0-9_]*$', name) is not None, LOGGER.error("given name '%s' can't be used as a variable name"%name)
klass = self.__class__.__name__
dependencies = ['import numpy as np','from fullrmc.Constraints import {klass}s'.format(klass=klass)]
code = []
if addDependencies:
code.extend(dependencies)
x = list(self.experimentalData[:,0])
y = list(self.experimentalData[:,1])
code.append("x = {x}".format(x=x))
code.append("y = {y}".format(y=y))
code.append("d = np.transpose([x,y]).astype(np.float32)")
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
sfp = self._shapeFuncParams
if isinstance(sfp, np.ndarray):
sfp = list(sfp)
code.append("sfp = {sfp}".format(sfp=sfp))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name} = {klass}s.{klass}\
(experimentalData=d, dataWeights=dw, weighting='{weighting}', atomsWeight={atomsWeight}, \
scaleFactor={scaleFactor}, adjustScaleFactor={adjustScaleFactor}, \
shapeFuncParams=sfp, windowFunction=wf, limits={limits})".format(name=name, klass=klass,
weighting=self.weighting, atomsWeight=self.atomsWeight,
scaleFactor=self.scaleFactor, adjustScaleFactor=self.adjustScaleFactor,
shapeFuncParams=sfp, limits=self.limits))
code.append("{engine}.add_constraints([{name}])".format(engine=engine, name=name))
# return
return dependencies, '\n'.join(code)
def _on_collector_reset(self):
pass
def _update_shape_array(self):
rmin = self._shapeFuncParams['rmin']
rmax = self._shapeFuncParams['rmax']
dr = self._shapeFuncParams['dr' ]
qmin = self._shapeFuncParams['qmin']
qmax = self._shapeFuncParams['qmax']
dq = self._shapeFuncParams['dq' ]
if rmax is None:
if self.engine.isPBC:
a = self.engine.boundaryConditions.get_a()
b = self.engine.boundaryConditions.get_b()
c = self.engine.boundaryConditions.get_c()
rmax = FLOAT_TYPE( np.max([a,b,c]) + 10 )
else:
coordsCenter = np.sum(self.engine.realCoordinates, axis=0)/self.engine.realCoordinates.shape[0]
coordinates = self.engine.realCoordinates-coordsCenter
distances = np.sqrt( np.sum(coordinates**2, axis=1) )
maxDistance = 2.*np.max(distances)
rmax = FLOAT_TYPE( maxDistance + 10 )
LOGGER.warn("@%s Better set shape function rmax with infinite boundary conditions. Here value is automatically set to %s"%(self.engine.usedFrame, rmax))
shapeFunc = ShapeFunction(engine = self.engine,
weighting = self.__weighting,
qmin=qmin, qmax=qmax, dq=dq,
rmin=rmin, rmax=rmax, dr=dr)
self._shapeArray = shapeFunc.get_Gr_shape_function( self.shellCenters )
del shapeFunc
# dump to repository
self._dump_to_repository({'_shapeArray': self._shapeArray})
def _reset_standard_error(self):
# recompute squared deviation
if self.data is not None:
totalPDF = self.__get_total_Gr(self.data, rho0=self.engine.numberDensity)
self.set_standard_error(self.compute_standard_error(modelData = totalPDF))
def _runtime_initialize(self):
if self._shapeFuncParams is None:
self._shapeArray = None
elif isinstance(self._shapeFuncParams, np.ndarray):
self._shapeArray = self._shapeFuncParams[self.limitsIndexStart:self.limitsIndexEnd+1]
elif isinstance(self._shapeFuncParams, dict) and self._shapeArray is None:
self._update_shape_array()
# reset standard error
self._reset_standard_error()
# set last shape update flag
self._lastShapeUpdate = self.engine.accepted
def _runtime_on_step(self):
""" Update shape function when needed. and update engine total """
if self._shapeUpdateFreq and self._shapeFuncParams is not None:
if (self._lastShapeUpdate != self.engine.accepted) and not (self.engine.accepted%self._shapeUpdateFreq):
# reset shape array
self._update_shape_array()
# reset standard error
self._reset_standard_error()
# update engine chiSquare
oldTotalStandardError = self.engine.totalStandardError
self.engine.update_total_standard_error()
LOGGER.info("@%s Constraint '%s' shape function updated, engine total standard error updated from %.6f to %.6f" %(self.engine.usedFrame, self.__class__.__name__, oldTotalStandardError, self.engine.totalStandardError))
self._lastShapeUpdate = self.engine.accepted
@property
def bin(self):
""" Experimental data distances bin."""
return self.__bin
@property
def minimumDistance(self):
""" Experimental data minimum distances."""
return self.__minimumDistance
@property
def maximumDistance(self):
""" Experimental data maximum distances."""
return self.__maximumDistance
@property
def histogramSize(self):
""" Histogram size."""
return self.__histogramSize
@property
def experimentalDistances(self):
""" Experimental distances array."""
return self.__experimentalDistances
@property
def shellCenters(self):
""" Shells center array."""
return self.__shellCenters
@property
def shellVolumes(self):
""" Shells volume array."""
return self.__shellVolumes
@property
def experimentalPDF(self):
""" Experimental pair distribution function data."""
return self.__experimentalPDF
@property
def elementsPairs(self):
""" Elements pairs."""
return self.__elementsPairs
@property
def weighting(self):
""" Elements weighting definition."""
return self.__weighting
@property
def atomsWeight(self):
"""Custom atoms weight"""
return self.__atomsWeight
@property
def weightingScheme(self):
""" Elements weighting scheme."""
return self.__weightingScheme
@property
def windowFunction(self):
""" Window function."""
return self.__windowFunction
@property
def shapeArray(self):
""" Shape function data array."""
return self._shapeArray
@property
def shapeUpdateFreq(self):
"""Shape function update frequency."""
return self._shapeUpdateFreq
def _set_weighting_scheme(self, weightingScheme):
"""To be only used internally by PairDistributionConstraint"""
self.__weightingScheme = weightingScheme
@property
def _experimentalX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalDistances
@property
def _experimentalY(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalPDF
@property
def _modelX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__shellCenters
def listen(self, message, argument=None):
"""
Listens to any message sent from the Broadcaster.
:Parameters:
#. message (object): Any python object to send to constraint's
listen method.
#. argument (object): Any type of argument to pass to the listeners.
"""
if message in ("engine set","update pdb","update molecules indexes","update elements indexes","update names indexes"):
if self.engine is not None:
self.__elementsPairs = sorted(itertools.combinations_with_replacement(self.engine.elements,2))
self._elementsWeight = get_real_elements_weight(elements=self.engine.elements, weightsDict=self.__atomsWeight, weighting=self.__weighting)
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight)
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
else:
self.__elementsPairs = None
self._elementsWeight = None
self.__weightingScheme = None
# dump to repository
self._dump_to_repository({'_PairDistributionConstraint__elementsPairs' : self.__elementsPairs,
'_PairDistributionConstraint__weightingScheme': self.__weightingScheme,
'_elementsWeight': self._elementsWeight})
self.reset_constraint() # ADDED 2017-JAN-08
elif message in("update boundary conditions",):
self.reset_constraint()
def set_shape_function_parameters(self, shapeFuncParams):
"""
Set the shape function. The shape function can be set to None which
means unsused, or set as a constant shape given by a numpy.ndarray or
computed from all atoms and updated every 'updateFreq' accepted moves.
The shape function is subtracted from the total G(r). It must be used
when non-periodic boundary conditions are used to take into account
the atomic density drop and to correct for the :math:`\\rho_{0}`
approximation.
:Parameters:
#. shapeFuncParams (None, numpy.ndarray, dict): The shape function
is subtracted from the total G(r). It must be used when
non-periodic boundary conditions are used to take into account
the atomic density drop and to correct for the :math:`\\rho_{0}`
approximation. The shape function can be set to None which means
unsused, or set as a constant shape given by a numpy.ndarray or
computed from all atoms and updated every 'updateFreq' accepted
moves. If dict is given the following keywords can be given,
otherwise default values will be automatically set.\n
* **rmin (number) default (0.00) :** The minimum distance in
:math:`\\AA` considered upon building the histogram prior to
computing the shape function. If not defined, rmin will be
set automatically to 0.
* **rmax (None, number) default (None) :** The maximum distance
in :math:`\\AA` considered upon building the histogram prior
to computing the shape function. If not defnined, rmax will
be automatically set to :math:`maximum\ box\ length + 10\\AA`
at engine runtime.
* **dr (number) default (0.5) :** The bin size in :math:`\\AA`
considered upon building the histogram prior to computing the
shape function. If not defined, it will be automatically set
to 0.5.
* **qmin (number) default (0.001) :** The minimum reciprocal
distance q in :math:`\\AA^{-1}` considered to compute the
shape function. If not defined, it will be automatically
set to 0.001.
* **qmax (number) default (0.75) :** The maximum reciprocal
distance q in :math:`\\AA^{-1}` considered to compute the
shape function. If not defined, it will be automatically
set to 0.75.
* **dq (number) default (0.005) :** The reciprocal distance bin
size in :math:`\\AA^{-1}` considered to compute the shape
function. If not defined, it will be automatically
set to 0.005.
* **updateFreq (integer) default (1000) :** The frequency of
recomputing the shape function in number of accpeted moves.
"""
self._shapeArray = None
if shapeFuncParams is None:
self._shapeFuncParams = None
self._shapeUpdateFreq = 0
elif isinstance(shapeFuncParams, dict):
rmin = FLOAT_TYPE( shapeFuncParams.get('rmin',0.00 ) )
rmax = shapeFuncParams.get('rmax',None )
dr = FLOAT_TYPE( shapeFuncParams.get('dr' ,0.5 ) )
qmin = FLOAT_TYPE( shapeFuncParams.get('qmin',0.001) )
qmax = FLOAT_TYPE( shapeFuncParams.get('qmax',0.75 ) )
dq = FLOAT_TYPE( shapeFuncParams.get('dq' ,0.005) )
self._shapeFuncParams = {'rmin':rmin, 'rmax':rmax, 'dr':dr,
'qmin':qmin, 'qmax':qmax, 'dq':dq }
self._shapeUpdateFreq = INT_TYPE( shapeFuncParams.get('updateFreq',1000) )
else:
assert isinstance(shapeFuncParams, (list,tuple,np.ndarray)), LOGGER.error("shapeFuncParams must be None, numpy.ndarray or a dictionary")
try:
shapeArray = np.array(shapeFuncParams)
except:
raise LOGGER.error("constant shapeFuncParams must be numpy.ndarray castable")
assert len(shapeFuncParams.shape) == 1, LOGGER.error("numpy.ndarray shapeFuncParams must be of dimension 1")
assert shapeFuncParams.shape[0] == self.experimentalData.shape[0], LOGGER.error("numpy.ndarray shapeFuncParams must have the same experimental data length")
for n in shapeFuncParams:
assert is_number(n), LOGGER.error("numpy.ndarray shapeFuncParams must be numbers")
self._shapeFuncParams = shapeFuncParams.astype(FLOAT_TYPE)
self._shapeUpdateFreq = 0
# dump to repository
self._dump_to_repository({'_shapeFuncParams': self._shapeFuncParams,
'_shapeUpdateFreq': self._shapeUpdateFreq})
def set_weighting(self, weighting):
"""
Set elements weighting. It must be a valid entry of pdbparser atom's
database.
:Parameters:
#. weighting (string): The elements weighting scheme. It must be
any atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius)
defined in pdbparser database. In case of xrays or neutrons
experimental weights, one can simply set weighting to 'xrays'
or 'neutrons' and the value will be automatically adjusted to
respectively 'atomicNumber' and 'neutronCohb'. If attribute
values are missing in the pdbparser database, atomic weights
must be given in atomsWeight dictionary argument.
"""
assert self.engine is None, LOGGER.error("Engine is already set. Reseting weighting is not allowed") # ADDED 2018-11-20
if weighting.lower() in ["xrays","x-rays","xray","x-ray"]:
LOGGER.fixed("'%s' weighting is set to atomicNumber"%weighting)
weighting = "atomicNumber"
elif weighting.lower() in ["neutron","neutrons"]:
LOGGER.fixed("'%s' weighting is set to neutronCohb"%weighting)
weighting = "neutronCohb"
assert is_element_property(weighting),LOGGER.error( "weighting is not a valid pdbparser atoms database entry")
assert weighting != "atomicFormFactor", LOGGER.error("atomicFormFactor weighting is not allowed")
self.__weighting = weighting
def set_atoms_weight(self, atomsWeight):
"""
Custom set atoms weight. This is the way to customize setting atoms
weights different than the given weighting scheme.
:Parameters:
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
"""
if atomsWeight is None:
AW = {}
else:
assert isinstance(atomsWeight, dict),LOGGER.error("atomsWeight must be None or a dictionary")
AW = {}
for k in atomsWeight:
assert isinstance(k, basestring),LOGGER.error("atomsWeight keys must be strings")
try:
val = FLOAT_TYPE(atomsWeight[k])
except:
raise LOGGER.error( "atomsWeight values must be numerical")
AW[k]=val
# set atomsWeight
self.__atomsWeight = AW
# reset weights
if self.engine is None:
self.__elementsPairs = None
self._elementsWeight = None
self.__weightingScheme = None
else:
isNormalFrame, isMultiframe, isSubframe = self.engine.get_frame_category(frame=self.engine.usedFrame)
self.__elementsPairs = sorted(itertools.combinations_with_replacement(self.engine.elements,2))
self._elementsWeight = get_real_elements_weight(elements=self.engine.elements, weightsDict=self.__atomsWeight, weighting=self.__weighting)
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight)
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
if isSubframe:
repo = self.engine._get_repository()
assert repo is not None, LOGGER.error("Repository is not defined, not allowed to set atoms weight for a subframe.")
mframe = self.engine.usedFrame.split(os.sep)[0]
LOGGER.usage("set_atoms_weight for '%s' subframe. This is going to automatically propagate to all '%s' multiframe subframes."%(self.engine.usedFrame,mframe))
for subfrm in self.engine.frames[mframe]['frames_name']:
frame = os.path.join(mframe,subfrm)
if frame != self.engine.usedFrame:
elements = repo.pull(relativePath=os.path.join(frame,'_Engine__elements'))
nAtomsPerElement = repo.pull(relativePath=os.path.join(frame,'_Engine__numberOfAtomsPerElement'))
elementsWeight = repo.pull(relativePath=os.path.join(frame,'constraints',self.constraintName,'_elementsWeight'))
elementsPairs = sorted(itertools.combinations_with_replacement(elements,2))
elementsWeight = get_real_elements_weight(elements=elements, weightsDict=self.__atomsWeight, weighting=self.__weighting)
weightingScheme = get_normalized_weighting(numbers=nAtomsPerElement, weights=elementsWeight)
for k in self.__weightingScheme:
weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
# dump to repository
self._dump_to_repository({'_PairDistributionConstraint__elementsPairs' : elementsPairs,
'_PairDistributionConstraint__weightingScheme': weightingScheme,
'_PairDistributionConstraint__atomsWeight' : self.__atomsWeight,
'_elementsWeight': elementsWeight},
frame=frame)
else:
assert isNormalFrame, LOGGER.error("Not allowed to set_atoms_weight for multiframe")
# dump to repository
self._dump_to_repository({'_PairDistributionConstraint__elementsPairs' : self.__elementsPairs,
'_PairDistributionConstraint__weightingScheme': self.__weightingScheme,
'_PairDistributionConstraint__atomsWeight' : self.__atomsWeight,
'_elementsWeight': self._elementsWeight})
def set_window_function(self, windowFunction, frame=None):
"""
Set convolution window function.
:Parameters:
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
#. frame (None, string): Target frame name. If None, engine used
frame is used. If multiframe is given, all subframes will be
targeted. If subframe is given, all other multiframe subframes
will be targeted.
"""
if windowFunction is not None:
assert isinstance(windowFunction, np.ndarray), LOGGER.error("windowFunction must be a numpy.ndarray")
assert windowFunction.dtype.type is FLOAT_TYPE, LOGGER.error("windowFunction type must be %s"%FLOAT_TYPE)
assert len(windowFunction.shape) == 1, LOGGER.error("windowFunction must be of dimension 1")
assert len(windowFunction) <= self.experimentalData.shape[0], LOGGER.error("windowFunction length must be smaller than experimental data")
# normalize window function
windowFunction /= np.sum(windowFunction)
# check window size
# set windowFunction
self.__windowFunction = windowFunction
# dump to repository
usedIncluded, frame, allFrames = get_caller_frames(engine=self.engine,
frame=frame,
subframeToAll=True,
caller="%s.%s"%(self.__class__.__name__,inspect.stack()[0][3]) )
if usedIncluded:
self.__windowFunction = windowFunction
for frm in allFrames:
self._dump_to_repository({'_PairDistributionConstraint__windowFunction': self.__windowFunction}, frame=frm)
def set_experimental_data(self, experimentalData):
"""
Set constraint's experimental data.
:Parameters:
#. experimentalData (numpy.ndarray, string): The experimental
data as numpy.ndarray or string path to load data using
numpy.loadtxt function.
"""
# get experimental data
super(PairDistributionConstraint, self).set_experimental_data(experimentalData=experimentalData)
self.__bin = FLOAT_TYPE(self.experimentalData[1,0] - self.experimentalData[0,0])
# dump to repository
self._dump_to_repository({'_PairDistributionConstraint__bin': self.__bin})
# set limits
self.set_limits(self.limits)
def set_limits(self, limits):
"""
Set the histogram computation limits.
:Parameters:
#. limits (None, tuple, list): Distance limits to bound
experimental data and compute histograms.
If None is given, the limits will be automatically
set the the min and max distance of the experimental data.
Otherwise, a tuple of exactly two items where the first is the
minimum distance or None and the second is the maximum distance
or None.
"""
self._ExperimentalConstraint__set_limits(limits)
# set minimumDistance, maximumDistance
self.__minimumDistance = FLOAT_TYPE(self.experimentalData[self.limitsIndexStart,0] - self.__bin/2. )
self.__maximumDistance = FLOAT_TYPE(self.experimentalData[self.limitsIndexEnd ,0] + self.__bin/2. )
self.__shellCenters = np.array([self.experimentalData[idx,0] for idx in range(self.limitsIndexStart,self.limitsIndexEnd +1)],dtype=FLOAT_TYPE)
# set histogram edges
edges = [self.experimentalData[idx,0] - self.__bin/2. for idx in range(self.limitsIndexStart,self.limitsIndexEnd +1)]
edges.append( self.experimentalData[self.limitsIndexEnd ,0] + self.__bin/2. )
self.__edges = np.array(edges, dtype=FLOAT_TYPE)
# set histogram size
self.__histogramSize = INT_TYPE( len(self.__edges)-1 )
# set shell centers and volumes
self.__shellVolumes = FLOAT_TYPE(4.0/3.)*PI*((self.__edges[1:])**3 - self.__edges[0:-1]**3)
# set experimental distances and pdf
self.__experimentalDistances = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd +1,0]
self.__experimentalPDF = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd +1,1]
# dump to repository
self._dump_to_repository({'_PairDistributionConstraint__minimumDistance' : self.__minimumDistance,
'_PairDistributionConstraint__maximumDistance' : self.__maximumDistance,
'_PairDistributionConstraint__shellCenters' : self.__shellCenters,
'_PairDistributionConstraint__edges' : self.__edges,
'_PairDistributionConstraint__histogramSize' : self.__histogramSize,
'_PairDistributionConstraint__shellVolumes' : self.__shellVolumes,
'_PairDistributionConstraint__experimentalDistances': self.__experimentalDistances,
'_PairDistributionConstraint__experimentalPDF' : self.__experimentalPDF})
# set used dataWeights
self._set_used_data_weights(limitsIndexStart=self.limitsIndexStart, limitsIndexEnd=self.limitsIndexEnd )
# reset constraint
self.reset_constraint()
def check_experimental_data(self, experimentalData):
"""
Check whether experimental data is correct.
:Parameters:
#. experimentalData (object): Experimental data to check.
:Returns:
#. result (boolean): Whether it is correct or not.
#. message (str): Checking message that explains whats's
wrong with the given data.
"""
if not isinstance(experimentalData, np.ndarray):
return False, "experimentalData must be a numpy.ndarray"
if experimentalData.dtype.type is not FLOAT_TYPE:
return False, "experimentalData type must be %s"%FLOAT_TYPE
if len(experimentalData.shape) !=2:
return False, "experimentalData must be of dimension 2"
if experimentalData.shape[1] !=2:
return False, "experimentalData must have only 2 columns"
# check distances order
inOrder = (np.array(sorted(experimentalData[:,0]), dtype=FLOAT_TYPE)-experimentalData[:,0])<=PRECISION
if not np.all(inOrder):
return False, "experimentalData distances are not sorted in order"
if experimentalData[0][0]<0:
return False, "experimentalData distances min value is found negative"
bin = experimentalData[1,0] -experimentalData[0,0]
bins = experimentalData[1:,0]-experimentalData[0:-1,0]
for b in bins:
if np.abs(b-bin)>PRECISION:
return False, "experimentalData distances bins are found not coherent"
# data format is correct
return True, ""
def compute_standard_error(self, modelData):
"""
Compute the standard error (StdErr) as the squared deviations
between model computed data and the experimental ones.
.. math::
StdErr = \\sum \\limits_{i}^{N} W_{i}(Y(X_{i})-F(X_{i}))^{2}
Where:\n
:math:`N` is the total number of experimental data points. \n
:math:`W_{i}` is the data point weight. It becomes equivalent to 1 when dataWeights is set to None. \n
:math:`Y(X_{i})` is the experimental data point :math:`X_{i}`. \n
:math:`F(X_{i})` is the computed from the model data :math:`X_{i}`. \n
:Parameters:
#. modelData (numpy.ndarray): The data to compare with the
experimental one and compute the squared deviation.
:Returns:
#. standardError (number): The calculated constraint's
standardError.
"""
# compute difference
diff = self.__experimentalPDF-modelData
# return squared deviation
if self._usedDataWeights is None:
return np.add.reduce((diff)**2)
else:
return np.add.reduce(self._usedDataWeights*((diff)**2))
def update_standard_error(self):
""" Compute and set constraint's standardError."""
# set standardError
totalPDF = self.get_constraint_value()["total"]
self.set_standard_error(self.compute_standard_error(modelData = totalPDF))
def __get_total_Gr(self, data, rho0):
""" This method is created just to speed up the computation
of the total gr upon fitting. _fittedScaleFactor get computed and
total Gr get scaled. Winhdow function will apply
"""
# update shape function if needed
# initialize Gr array
Gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
Dij = FLOAT_TYPE( Nij/self.engine.volume )
nij = data["intra"][idi,idj,:]+data["inter"][idi,idj,:]
Gr += wij*nij/Dij
else:
Nij = ni*nj
Dij = FLOAT_TYPE( Nij/self.engine.volume )
nij = data["intra"][idi,idj,:]+data["intra"][idj,idi,:] + data["inter"][idi,idj,:]+data["inter"][idj,idi,:]
Gr += wij*nij/Dij
# Divide by shells volume
Gr /= self.shellVolumes
# compute total G(r)
#rho0 = self.engine.numberDensity #(self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (4.*PI*self.__shellCenters*rho0)*(Gr-1)
# remove shape function
if self._shapeArray is not None:
Gr -= self._shapeArray
# multiply by scale factor
self._fittedScaleFactor = self.get_adjusted_scale_factor(self.experimentalPDF, Gr, self._usedDataWeights)
if self._fittedScaleFactor != 1:
Gr *= FLOAT_TYPE(self._fittedScaleFactor)
# apply multiframe prior and weight
Gr = self._apply_multiframe_prior(Gr)
# convolve total with window function
if self.__windowFunction is not None:
Gr = np.convolve(Gr, self.__windowFunction, 'same')
# return array
return Gr
def _get_constraint_value(self, data, applyMultiframePrior=True):
"""This will compute constraint data (intra, inter, total, total_no_window)
scale factor will be applied but multiframe prior won't"""
# http://erice2011.docking.org/upload/Other/Billinge_PDF/03-ReadingMaterial/BillingePDF2011.pdf page 6
#import time
#startTime = time.clock()
#if self._shapeFuncParams is not None and self._shapeArray is None:
# self.__set_shape_array()
output = {}
for pair in self.__elementsPairs:
output["rdf_intra_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["rdf_inter_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["rdf_total_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = FLOAT_TYPE( ni*(ni-1)/2.0 )
output["rdf_intra_%s-%s" % pair] += data["intra"][idi,idj,:]
output["rdf_inter_%s-%s" % pair] += data["inter"][idi,idj,:]
else:
Nij = FLOAT_TYPE( ni*nj )
output["rdf_intra_%s-%s" % pair] += data["intra"][idi,idj,:] + data["intra"][idj,idi,:]
output["rdf_inter_%s-%s" % pair] += data["inter"][idi,idj,:] + data["inter"][idj,idi,:]
# compute g(r)
nij = output["rdf_intra_%s-%s" % pair] + output["rdf_inter_%s-%s" % pair]
dij = nij/self.__shellVolumes
Dij = Nij/self.engine.volume
gr += wij*dij/Dij
# calculate intensityFactor
intensityFactor = (self.engine.volume*wij)/(Nij*self.__shellVolumes)
# divide by factor
output["rdf_intra_%s-%s" % pair] *= intensityFactor
output["rdf_inter_%s-%s" % pair] *= intensityFactor
output["rdf_total_%s-%s" % pair] = output["rdf_intra_%s-%s" % pair] + output["rdf_inter_%s-%s" % pair]
## compute g(r) equivalent to earlier gr += wij*dij/Dij
#gr += output["rdf_total_%s-%s" % pair]
# compute total G(r)
rho0 = self.engine.numberDensity #(self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
output["total_no_window"] = (4.*PI*self.__shellCenters*rho0) * (gr-1)
# remove shape function
if self._shapeArray is not None:
output["total_no_window"] -= self._shapeArray
# multiply by scale factor
if self.scaleFactor != 1:
output["total_no_window"] *= self.scaleFactor
# apply multiframe prior and weight
if applyMultiframePrior:
output["total_no_window"] = self._apply_multiframe_prior(output["total_no_window"])
# convolve total with window function
if self.__windowFunction is not None:
output["total"] = np.convolve(output["total_no_window"], self.__windowFunction, 'same')
else:
output["total"] = output["total_no_window"]
#t = time.clock()-startTime
#print("%.7f(s) --> %.7f(Ms)"%(t, 1000000*t))
return output
def get_constraint_value(self, applyMultiframePrior=True):
"""
Compute all partial Pair Distribution Functions (PDFs).
:Parameters:
#. applyMultiframePrior (boolean): Whether to apply subframe weight
and prior to the total. This will only have an effect when used
frame is a subframe and in case subframe weight and prior is
defined.
:Returns:
#. PDFs (dictionary): The PDFs dictionnary, where keys are the
element wise intra and inter molecular PDFs and values are the
computed PDFs.
"""
if self.data is None:
LOGGER.warn("data must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.data, applyMultiframePrior=applyMultiframePrior)
def get_constraint_original_value(self):
"""
Compute all partial Pair Distribution Functions (PDFs).
:Returns:
#. PDFs (dictionary): The PDFs dictionnary, where keys are the
element wise intra and inter molecular PDFs and values are
the computed PDFs.
"""
if self.originalData is None:
LOGGER.warn("originalData must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.originalData)
@reset_if_collected_out_of_date
def compute_data(self, update=True):
""" Compute constraint's data.
:Parameters:
#. update (boolean): whether to update constraint data and
standard error with new computation. If data is computed and
updated by another thread or process while the stochastic
engine is running, this might lead to a state alteration of
the constraint which will lead to a no additional accepted
moves in the run
:Returns:
#. data (dict): constraint data dictionary
#. standardError (float): constraint standard error
"""
intra,inter = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# create data and compute standard error
data = {"intra":intra, "inter":inter}
totalPDF = self.__get_total_Gr(data, rho0=self.engine.numberDensity)
stdError = self.compute_standard_error(modelData = totalPDF)
# update
if update:
self.set_data(data)
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# set standardError
self.set_standard_error(stdError)
# set original data
if self.originalData is None:
self._set_original_data(self.data)
# return
return data, stdError
def compute_before_move(self, realIndexes, relativeIndexes):
"""
Compute constraint before move is executed.
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
"""
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores)
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# set active atoms data
self.set_active_atoms_data_before_move( {"intra":intraM-intraF, "inter":interM-interF} )
self.set_active_atoms_data_after_move(None)
def compute_after_move(self, realIndexes, relativeIndexes, movedBoxCoordinates):
"""
Compute constraint after move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
#. movedBoxCoordinates (numpy.ndarray): The moved atoms new coordinates.
"""
# change coordinates temporarily
boxData = np.array(self.engine.boxCoordinates[relativeIndexes], dtype=FLOAT_TYPE)
self.engine.boxCoordinates[relativeIndexes] = movedBoxCoordinates
# calculate pair distribution function
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# set ative atoms data
self.set_active_atoms_data_after_move( {"intra":intraM-intraF, "inter":interM-interF} )
# reset coordinates
self.engine.boxCoordinates[relativeIndexes] = boxData
# compute and set standardError after move
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
totalPDF = self.__get_total_Gr({"intra":dataIntra, "inter":dataInter}, rho0=self.engine.numberDensity)
self.set_after_move_standard_error( self.compute_standard_error(modelData = totalPDF) )
# increment tried
self.increment_tried()
def accept_move(self, realIndexes, relativeIndexes):
"""
Accept move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
# change permanently _data
self.set_data( {"intra":dataIntra, "inter":dataInter} )
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_standard_error( self.afterMoveStandardError )
self.set_after_move_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
# increment accepted
self.increment_accepted()
def reject_move(self, realIndexes, relativeIndexes):
"""
Reject move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_after_move_standard_error( None )
def compute_as_if_amputated(self, realIndex, relativeIndex):
"""
Compute and return constraint's data and standard error as if
given atom is amputated.
:Parameters:
#. realIndex (numpy.ndarray): Atom's index as a numpy array
of a single element.
#. relativeIndex (numpy.ndarray): Atom's relative index as a
numpy array of a single element.
"""
# compute data
self.compute_before_move(realIndexes=realIndex, relativeIndexes=relativeIndex)
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]
data = {"intra":dataIntra, "inter":dataInter}
# temporarily adjust self.__weightingScheme
weightingScheme = self.__weightingScheme
relativeIndex = relativeIndex[0]
selectedElement = self.engine.allElements[relativeIndex]
self.engine.numberOfAtomsPerElement[selectedElement] -= 1
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight )
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
# compute standard error
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
SF = self.adjustScaleFactorFrequency
self._set_adjust_scale_factor_frequency(0)
rho0 = ((self.engine.numberOfAtoms-1)/self.engine.volume).astype(FLOAT_TYPE)
totalPDF = self.__get_total_Gr(data, rho0=rho0)
standardError = self.compute_standard_error(modelData = totalPDF)
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
self._set_adjust_scale_factor_frequency(SF)
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
# set data
self.set_amputation_data( {'data':data, 'weightingScheme':self.__weightingScheme} )
# compute standard error
self.set_amputation_standard_error( standardError )
# reset weightingScheme and number of atoms per element
self.__weightingScheme = weightingScheme
self.engine.numberOfAtomsPerElement[selectedElement] += 1
print(self.engine.numberOfAtoms, rho0, self.engine.numberOfAtomsPerElement, self.engine.numberDensity)
def accept_amputation(self, realIndex, relativeIndex):
"""
Accept amputated atom and sets constraints data and standard error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
self.set_data( self.amputationData['data'] )
self.__weightingScheme = self.amputationData['weightingScheme']
self.set_standard_error( self.amputationStandardError )
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
def reject_amputation(self, realIndex, relativeIndex):
"""
Reject amputated atom and set constraint's data and standard
error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
def _on_collector_collect_atom(self, realIndex):
pass
def _on_collector_release_atom(self, realIndex):
pass
def get_multiframe_weights(self, frame):
"""
"""
from collections import OrderedDict
isNormalFrame, isMultiframe, isSubframe = self.engine.get_frame_category(frame=frame)
assert isMultiframe, LOGGER.error("Given frame '%s' is not a multiframe"%frame)
repo = self.engine._get_repository()
weights = OrderedDict()
for frm in self.engine.frames[frame]['frames_name']:
value = repo.pull(relativePath=os.path.join(frame,frm,'constraints',self.constraintName,'_ExperimentalConstraint__multiframeWeight'))
weights[frm] = value
return weights
def _constraint_copy_needs_lut(self):
return {'_PairDistributionConstraint__elementsPairs' :'_PairDistributionConstraint__elementsPairs',
'_PairDistributionConstraint__histogramSize' :'_PairDistributionConstraint__histogramSize',
'_PairDistributionConstraint__weightingScheme' :'_PairDistributionConstraint__weightingScheme',
'_PairDistributionConstraint__shellVolumes' :'_PairDistributionConstraint__shellVolumes',
'_PairDistributionConstraint__shellCenters' :'_PairDistributionConstraint__shellCenters',
'_PairDistributionConstraint__windowFunction' :'_PairDistributionConstraint__windowFunction',
'_PairDistributionConstraint__experimentalDistances':'_PairDistributionConstraint__experimentalDistances',
'_PairDistributionConstraint__experimentalPDF' :'_PairDistributionConstraint__experimentalPDF',
'_PairDistributionConstraint__minimumDistance' :'_PairDistributionConstraint__minimumDistance',
'_PairDistributionConstraint__maximumDistance' :'_PairDistributionConstraint__maximumDistance',
'_PairDistributionConstraint__bin' :'_PairDistributionConstraint__bin',
'_shapeArray' :'_shapeArray',
'_ExperimentalConstraint__scaleFactor' :'_ExperimentalConstraint__scaleFactor',
'_ExperimentalConstraint__dataWeights' :'_ExperimentalConstraint__dataWeights',
'_ExperimentalConstraint__multiframePrior' :'_ExperimentalConstraint__multiframePrior',
'_ExperimentalConstraint__multiframeWeight' :'_ExperimentalConstraint__multiframeWeight',
'_ExperimentalConstraint__limits' :'_ExperimentalConstraint__limits',
'_ExperimentalConstraint__limitsIndexStart' :'_ExperimentalConstraint__limitsIndexStart',
'_ExperimentalConstraint__limitsIndexEnd' :'_ExperimentalConstraint__limitsIndexEnd',
'_usedDataWeights' :'_usedDataWeights',
'_Constraint__used' :'_Constraint__used',
'_Constraint__data' :'_Constraint__data',
'_Constraint__state' :'_Constraint__state',
'_Engine__state' :'_Engine__state',
'_Engine__boxCoordinates' :'_Engine__boxCoordinates',
'_Engine__basisVectors' :'_Engine__basisVectors',
'_Engine__isPBC' :'_Engine__isPBC',
'_Engine__moleculesIndex' :'_Engine__moleculesIndex',
'_Engine__elementsIndex' :'_Engine__elementsIndex',
'_Engine__numberOfAtomsPerElement' :'_Engine__numberOfAtomsPerElement',
'_Engine__elements' :'_Engine__elements',
'_Engine__numberDensity' :'_Engine__numberDensity',
'_Engine__volume' :'_Engine__volume',
'_atomsCollector' :'_atomsCollector',
('engine','_atomsCollector') :'_atomsCollector',
}
def plot(self, xlabelParams={'xlabel':'$r(\\AA)$', 'size':10},
ylabelParams={'ylabel':'$G(r)(\\AA^{-2})$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(PairDistributionConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
| agpl-3.0 |
BiaDarkia/scikit-learn | sklearn/linear_model/tests/test_base.py | 33 | 17862 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import _preprocess_data
from sklearn.linear_model.base import sparse_center_data, center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
rng = np.random.RandomState(0)
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_preprocess_data_weighted():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt.A, XA / expected_X_norm)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_dtype_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
X_32 = np.asarray(X, dtype=np.float32)
y_32 = np.asarray(y, dtype=np.float32)
X_64 = np.asarray(X, dtype=np.float64)
y_64 = np.asarray(y, dtype=np.float64)
for fit_intercept in [True, False]:
for normalize in [True, False]:
Xt_32, yt_32, X_mean_32, y_mean_32, X_norm_32 = _preprocess_data(
X_32, y_32, fit_intercept=fit_intercept, normalize=normalize,
return_mean=True)
Xt_64, yt_64, X_mean_64, y_mean_64, X_norm_64 = _preprocess_data(
X_64, y_64, fit_intercept=fit_intercept, normalize=normalize,
return_mean=True)
Xt_3264, yt_3264, X_mean_3264, y_mean_3264, X_norm_3264 = (
_preprocess_data(X_32, y_64, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True))
Xt_6432, yt_6432, X_mean_6432, y_mean_6432, X_norm_6432 = (
_preprocess_data(X_64, y_32, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True))
assert_equal(Xt_32.dtype, np.float32)
assert_equal(yt_32.dtype, np.float32)
assert_equal(X_mean_32.dtype, np.float32)
assert_equal(y_mean_32.dtype, np.float32)
assert_equal(X_norm_32.dtype, np.float32)
assert_equal(Xt_64.dtype, np.float64)
assert_equal(yt_64.dtype, np.float64)
assert_equal(X_mean_64.dtype, np.float64)
assert_equal(y_mean_64.dtype, np.float64)
assert_equal(X_norm_64.dtype, np.float64)
assert_equal(Xt_3264.dtype, np.float32)
assert_equal(yt_3264.dtype, np.float32)
assert_equal(X_mean_3264.dtype, np.float32)
assert_equal(y_mean_3264.dtype, np.float32)
assert_equal(X_norm_3264.dtype, np.float32)
assert_equal(Xt_6432.dtype, np.float64)
assert_equal(yt_6432.dtype, np.float64)
assert_equal(X_mean_6432.dtype, np.float64)
assert_equal(y_mean_6432.dtype, np.float64)
assert_equal(X_norm_6432.dtype, np.float64)
assert_equal(X_32.dtype, np.float32)
assert_equal(y_32.dtype, np.float32)
assert_equal(X_64.dtype, np.float64)
assert_equal(y_64.dtype, np.float64)
assert_array_almost_equal(Xt_32, Xt_64)
assert_array_almost_equal(yt_32, yt_64)
assert_array_almost_equal(X_mean_32, X_mean_64)
assert_array_almost_equal(y_mean_32, y_mean_64)
assert_array_almost_equal(X_norm_32, X_norm_64)
def test_rescale_data():
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
@ignore_warnings # all deprecation warnings
def test_deprecation_center_data():
n_samples = 200
n_features = 2
w = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
param_grid = product([True, False], [True, False], [True, False],
[None, w])
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
XX = X.copy() # such that we can try copy=False as well
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
XX = X.copy()
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
assert_array_almost_equal(X1, X2)
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
# Sparse cases
X = sparse.csr_matrix(X)
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=copy, sample_weight=sample_weight)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight, return_mean=False)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
for (fit_intercept, normalize) in product([True, False], [True, False]):
X1, y1, X1_mean, X1_var, y1_mean = \
sparse_center_data(X, y, fit_intercept=fit_intercept,
normalize=normalize)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
| bsd-3-clause |
srslynow/legal-text-mining | to_vector_data.py | 1 | 2895 | import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import glob
# path variables
xmls_path = r'E:\Datasets\rechtspraak\txt'
files = glob.glob(os.path.join(xmls_path, '*.txt'))
# vars to be filled
rechtspraak_text = []
rechtspraak_labels = []
# use labels
use_labels = ['Strafrecht','Vreemdelingenrecht','Socialezekerheidsrecht','Belastingrecht','Civiel recht','Bestuursrecht','Personen- en familierecht']
print("Reading clean text files")
for i,file in enumerate(files):
if i % 1000 == 0:
print(str(i) + '\t\t' + file)
with open(file) as txt_file:
subjects = txt_file.readline().strip('\n').split(',')
clean_text = txt_file.readline().strip('\n')
# save to list
rechtspraak_labels.append(subjects)
rechtspraak_text.append(clean_text)
# filter labels
rechtspraak_labels = [[label for label in lab_row if label in use_labels] for lab_row in rechtspraak_labels]
# delete rows with no labels
delete_indices = [i for i,lab_row in enumerate(rechtspraak_labels) if len(lab_row) == 0]
for i in delete_indices:
del rechtspraak_labels[i]
del rechtspraak_text[i]
print("Encoding & binarizing labels")
mlb = MultiLabelBinarizer()
#mlb.fit(set(use_labels))
rechtspraak_labels = mlb.fit_transform(rechtspraak_labels)
# split into training & test set
train_test_split = np.random.choice([0, 1], size=len(rechtspraak_labels), p=[.75, .25])
rechtspraak_train_labels = [rechtspraak_labels[elem] for elem,i in enumerate(train_test_split) if i == 0]
rechtspraak_test_labels = [rechtspraak_labels[elem] for elem,i in enumerate(train_test_split) if i == 1]
rechtspraak_train_text = [rechtspraak_text[elem] for elem,i in enumerate(train_test_split) if i == 0]
rechtspraak_test_text = [rechtspraak_text[elem] for elem,i in enumerate(train_test_split) if i == 1]
print("Building vocabulary")
vectorizer = CountVectorizer(analyzer = "word", tokenizer = None, preprocessor = None, stop_words = None, max_features = 5000)
vectorizer.fit(rechtspraak_train_text)
print("Transforming str to ints")
vocabulary = vectorizer.get_feature_names()
vocabulary = {el:i for i,el in enumerate(vocabulary)}
rechtspraak_train_text = [[vocabulary[word] for word in text_row.split() if word in vocabulary] for text_row in rechtspraak_train_text]
rechtspraak_test_text = [[vocabulary[word] for word in text_row.split() if word in vocabulary] for text_row in rechtspraak_test_text]
print("Writing to files")
np.save("train_data_vec", rechtspraak_train_text)
np.save("train_label_vec", rechtspraak_train_labels)
np.save("test_data_vec", rechtspraak_test_text)
np.save("test_label_vec", rechtspraak_test_labels) | gpl-3.0 |
vshtanko/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 85 | 6377 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
p1 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[0])
p2 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[1])
p3 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[2])
labels = ['n_estimators=' + str(params_list[0]['n_estimators']) +
', n_candidates=' + str(params_list[0]['n_candidates']),
'n_estimators=' + str(params_list[1]['n_estimators']) +
', n_candidates=' + str(params_list[1]['n_candidates']),
'n_estimators=' + str(params_list[2]['n_estimators']) +
', n_candidates=' + str(params_list[2]['n_candidates'])]
# Plot precision
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
fashandge/partools | setup.py | 1 | 2311 | #!/usr/bin/env python
#from distutils.core import setup
from codecs import open
import re
import os
from os import path
from setuptools import setup, find_packages
def read(*names, **kwargs):
with open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='partools',
version=find_version('partools/__init__.py'),
description=('Utility functions for embarassingly parallel processing with multicores on a single machine, including a parallel version of map, and parallel processing of pandas dataframe.'),
long_description=long_description,
author='Jianfu Chen',
license='APACHE-2.0',
url='https://github.com/fashandge/partools',
#py_modules=['parmap'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# include README.md to . dir
data_files=[('', ['README.md'])],
include_package_data=True,
test_suite='nose.collector',
tests_require=['nose', 'nose-cover3'],
zip_safe=False,
)
| apache-2.0 |
moutai/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 33 | 4515 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import matplotlib.pyplot as plt
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
plt.matshow(cm)
plt.title('Confusion matrix of the %s classifier' % name)
plt.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
plt.show()
| bsd-3-clause |
petebachant/scipy | doc/source/tutorial/examples/normdiscr_plot1.py | 84 | 1547 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2, 1) #integer grid
gridlimitsnorm = (grid-0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd=rvs
f,l = np.histogram(rvs, bins=gridlimits)
sfreq = np.vstack([gridint, f, probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.pdf(ind, scale=nd_std),
color='b')
plt.ylabel('Frequency')
plt.title('Frequency and Probability of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
kiyoto/statsmodels | statsmodels/datasets/tests/test_utils.py | 26 | 1697 | import os
import sys
from statsmodels.datasets import get_rdataset, webuse, check_internet
from numpy.testing import assert_, assert_array_equal, dec
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_get_rdataset():
# smoke test
if sys.version_info[0] >= 3:
#NOTE: there's no way to test both since the cached files were
#created with Python 2.x, they're strings, but Python 3 expects
#bytes and the index file path is hard-coded so both can't live
#side by side
pass
#duncan = get_rdataset("Duncan-py3", "car", cache=cur_dir)
else:
duncan = get_rdataset("Duncan", "car", cache=cur_dir)
assert_(duncan.from_cache)
#internet_available = check_internet()
#@dec.skipif(not internet_available)
def t_est_webuse():
# test copied and adjusted from iolib/tests/test_foreign
from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2
#base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
base_gh = "http://statsmodels.sourceforge.net/devel/_static/"
res1 = webuse('macrodata', baseurl=base_gh, as_df=False)
assert_array_equal(res1 == res2, True)
#@dec.skipif(not internet_available)
def t_est_webuse_pandas():
# test copied and adjusted from iolib/tests/test_foreign
from pandas.util.testing import assert_frame_equal
from statsmodels.datasets import macrodata
dta = macrodata.load_pandas().data
base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
res1 = webuse('macrodata', baseurl=base_gh)
res1 = res1.astype(float)
assert_frame_equal(res1, dta)
| bsd-3-clause |
JonWel/CoolProp | wrappers/Python/CoolProp/Plots/SimpleCyclesCompression.py | 3 | 8420 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numpy as np
import CoolProp
from .Common import process_fluid_state
from .SimpleCycles import BaseCycle, StateContainer
class BaseCompressionCycle(BaseCycle):
"""A thermodynamic cycle for vapour compression processes.
Defines the basic properties and methods to unify access to
compression cycle-related quantities.
"""
def __init__(self, fluid_ref='HEOS::Water', graph_type='PH', **kwargs):
"""see :class:`CoolProp.Plots.SimpleCycles.BaseCycle` for details."""
BaseCycle.__init__(self, fluid_ref, graph_type, **kwargs)
def eta_carnot_heating(self):
"""Carnot efficiency
Calculates the Carnot efficiency for a heating process, :math:`\eta_c = \frac{T_h}{T_h-T_c}`.
Returns
-------
float
"""
Tvector = self._cycle_states.T
return np.max(Tvector) / (np.max(Tvector) - np.min(Tvector))
def eta_carnot_cooling(self):
"""Carnot efficiency
Calculates the Carnot efficiency for a cooling process, :math:`\eta_c = \frac{T_c}{T_h-T_c}`.
Returns
-------
float
"""
Tvector = self._cycle_states.T
return np.min(Tvector) / (np.max(Tvector) - np.min(Tvector))
class SimpleCompressionCycle(BaseCompressionCycle):
"""A simple vapour compression cycle"""
STATECOUNT=4
STATECHANGE=[
lambda inp: BaseCycle.state_change(inp,'S','P',0,ty1='log',ty2='log'), # Compression process
lambda inp: BaseCycle.state_change(inp,'H','P',1,ty1='lin',ty2='lin'), # Heat removal
lambda inp: BaseCycle.state_change(inp,'H','P',2,ty1='log',ty2='log'), # Expansion
lambda inp: BaseCycle.state_change(inp,'H','P',3,ty1='lin',ty2='lin') # Heat addition
]
def __init__(self, fluid_ref='HEOS::Water', graph_type='PH', **kwargs):
"""see :class:`CoolProp.Plots.SimpleCyclesCompression.BaseCompressionCycle` for details."""
BaseCompressionCycle.__init__(self, fluid_ref, graph_type, **kwargs)
def simple_solve(self, T0, p0, T2, p2, eta_com, fluid=None, SI=True):
""""
A simple vapour compression cycle calculation
Parameters
----------
T0 : float
The evaporated fluid, before the compressor
p0 : float
The evaporated fluid, before the compressor
T2 : float
The condensed fluid, before the expansion valve
p2 : float
The condensed fluid, before the expansion valve
eta_com : float
Isentropic compressor efficiency
Examples
--------
>>> import CoolProp
>>> from CoolProp.Plots import PropertyPlot
>>> from CoolProp.Plots import SimpleCompressionCycle
>>> pp = PropertyPlot('HEOS::R134a', 'PH', unit_system='EUR')
>>> pp.calc_isolines(CoolProp.iQ, num=11)
>>> cycle = SimpleCompressionCycle('HEOS::R134a', 'PH', unit_system='EUR')
>>> T0 = 280
>>> pp.state.update(CoolProp.QT_INPUTS,0.0,T0-15)
>>> p0 = pp.state.keyed_output(CoolProp.iP)
>>> T2 = 310
>>> pp.state.update(CoolProp.QT_INPUTS,1.0,T2+10)
>>> p2 = pp.state.keyed_output(CoolProp.iP)
>>> cycle.simple_solve(T0, p0, T2, p2, 0.7, SI=True)
>>> cycle.steps = 50
>>> sc = cycle.get_state_changes()
>>> import matplotlib.pyplot as plt
>>> plt.close(cycle.figure)
>>> pp.draw_process(sc)
"""
if fluid is not None:
self.state = process_fluid_state(fluid)
if self._state is None:
raise ValueError("You have to specify a fluid before you can calculate.")
cycle_states = StateContainer(unit_system=self._system)
if not SI:
conv_t = self._system[CoolProp.iT].to_SI
conv_p = self._system[CoolProp.iP].to_SI
T0 = conv_t(T0)
p0 = conv_p(p0)
T2 = conv_t(T2)
p2 = conv_p(p2)
# Gas from evaporator
self.state.update(CoolProp.PT_INPUTS,p0,T0)
h0 = self.state.hmass()
s0 = self.state.smass()
# Just a showcase for the different accessor methods
cycle_states[0,'H'] = h0
cycle_states[0]['S'] = s0
cycle_states[0][CoolProp.iP] = p0
cycle_states[0,CoolProp.iT] = T0
# Pressurised vapour
p1 = p2
self.state.update(CoolProp.PSmass_INPUTS,p1,s0)
h1 = h0 + (self.state.hmass() - h0) / eta_com
self.state.update(CoolProp.HmassP_INPUTS,h1,p1)
s1 = self.state.smass()
T1 = self.state.T()
cycle_states[1,'H'] = h1
cycle_states[1,'S'] = s1
cycle_states[1,'P'] = p1
cycle_states[1,'T'] = T1
# Condensed vapour
self.state.update(CoolProp.PT_INPUTS,p2,T2)
h2 = self.state.hmass()
s2 = self.state.smass()
cycle_states[2,'H'] = h2
cycle_states[2,'S'] = s2
cycle_states[2,'P'] = p2
cycle_states[2,'T'] = T2
# Expanded fluid, 2-phase
p3 = p0
h3 = h2
self.state.update(CoolProp.HmassP_INPUTS,h3,p3)
s3 = self.state.smass()
T3 = self.state.T()
cycle_states[3,'H'] = h3
cycle_states[3,'S'] = s3
cycle_states[3,'P'] = p3
cycle_states[3,'T'] = T3
w_net = h0 - h1
q_evap = h0 - h3
self.cycle_states = cycle_states
self.fill_states()
def simple_solve_dt(self, Te, Tc, dT_sh, dT_sc, eta_com, fluid=None, SI=True):
""""
A simple vapour compression cycle calculation based on
superheat, subcooling and temperatures.
Parameters
----------
Te : float
The evaporation temperature
Tc : float
The condensation temperature
dT_sh : float
The superheat after the evaporator
dT_sc : float
The subcooling after the condenser
eta_com : float
Isentropic compressor efficiency
Examples
--------
>>> import CoolProp
>>> from CoolProp.Plots import PropertyPlot
>>> from CoolProp.Plots import SimpleCompressionCycle
>>> pp = PropertyPlot('HEOS::R134a', 'PH', unit_system='EUR')
>>> pp.calc_isolines(CoolProp.iQ, num=11)
>>> cycle = SimpleCompressionCycle('HEOS::R134a', 'PH', unit_system='EUR')
>>> Te = 265
>>> Tc = 300
>>> cycle.simple_solve_dt(Te, Tc, 10, 15, 0.7, SI=True)
>>> cycle.steps = 50
>>> sc = cycle.get_state_changes()
>>> import matplotlib.pyplot as plt
>>> plt.close(cycle.figure)
>>> pp.draw_process(sc)
"""
if fluid is not None:
self.state = process_fluid_state(fluid)
if self._state is None:
raise ValueError("You have to specify a fluid before you can calculate.")
if not SI:
conv_t = self._system[CoolProp.iT].to_SI
Te = conv_p(Te)
Tc = conv_p(Tc)
# Get the saturation conditions
self.state.update(CoolProp.QT_INPUTS,1.0,Te)
p0 = self.state.p()
self.state.update(CoolProp.QT_INPUTS,0.0,Tc)
p2 = self.state.p()
T0 = Te + dT_sh
T2 = Tc - dT_sc
self.simple_solve(T0, p0, T2, p2, eta_com, fluid=None, SI=True)
def COP_heating(self):
"""COP for a heating process
Calculates the coefficient of performance for a heating process, :math:`COP_h = \frac{q_{con}}{w_{comp}}`.
Returns
-------
float
"""
return (self.cycle_states[1,'H'] - self.cycle_states[2,'H']) / (self.cycle_states[1,'H'] - self.cycle_states[0,'H'])
def COP_cooling(self):
"""COP for a cooling process
Calculates the coefficient of performance for a cooling process, :math:`COP_c = \frac{q_{eva}}{w_{comp}}`.
Returns
-------
float
"""
return (self.cycle_states[0,'H'] - self.cycle_states[3,'H']) / (self.cycle_states[1,'H'] - self.cycle_states[0,'H'])
| mit |
victorbergelin/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
DTasev/rnd | py/lasso_selection.py | 1 | 2807 | from __future__ import print_function
from six.moves import input
import numpy as np
from matplotlib.widgets import LassoSelector
from matplotlib.path import Path
class SelectFromCollection(object):
"""Select indices from a matplotlib collection using `LassoSelector`.
Selected indices are saved in the `ind` attribute. This tool highlights
selected points by fading them out (i.e., reducing their alpha values).
If your collection has alpha < 1, this tool will permanently alter them.
Note that this tool selects collection objects based on their *origins*
(i.e., `offsets`).
Parameters
----------
ax : :class:`~matplotlib.axes.Axes`
Axes to interact with.
collection : :class:`matplotlib.collections.Collection` subclass
Collection you want to select from.
alpha_other : 0 <= float <= 1
To highlight a selection, this tool sets all selected points to an
alpha value of 1 and non-selected points to `alpha_other`.
"""
def __init__(self, ax, collection, alpha_other=0.3):
self.canvas = ax.figure.canvas
self.collection = collection
self.alpha_other = alpha_other
self.xys = collection.get_offsets()
self.Npts = len(self.xys)
# Ensure that we have separate colors for each object
self.fc = collection.get_facecolors()
if len(self.fc) == 0:
raise ValueError('Collection must have a facecolor')
elif len(self.fc) == 1:
self.fc = np.tile(self.fc, self.Npts).reshape(self.Npts, -1)
self.lasso = LassoSelector(ax, onselect=self.onselect)
self.ind = []
def onselect(self, verts):
path = Path(verts)
self.ind = np.nonzero([path.contains_point(xy) for xy in self.xys])[0]
self.fc[:, -1] = self.alpha_other
self.fc[self.ind, -1] = 1
self.collection.set_facecolors(self.fc)
self.canvas.draw_idle()
def disconnect(self):
self.lasso.disconnect_events()
self.fc[:, -1] = 1
self.collection.set_facecolors(self.fc)
self.canvas.draw_idle()
if __name__ == '__main__':
import matplotlib.pyplot as plt
plt.ion()
data = np.random.rand(100, 2)
subplot_kw = dict(xlim=(0, 1), ylim=(0, 1), autoscale_on=False)
fig, ax = plt.subplots(subplot_kw=subplot_kw)
pts = ax.scatter(data[:, 0], data[:, 1], s=80)
selector = SelectFromCollection(ax, pts)
plt.draw()
input('Press Enter to accept selected points')
print("Selected points:")
print(selector.xys[selector.ind])
selector.disconnect()
# Block end of script so you can check that the lasso is disconnected.
input('Press Enter to quit') | gpl-3.0 |
maximus009/kaggle-galaxies | try_convnet_cc_multirotflip_3x69r45_pysex.py | 7 | 17508 | import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_pysex.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_pysex.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes, processor_class=ra.LoadAndProcessPysexCenteringRescaling)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes,
processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4 = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5)
# l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
# l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
ehogan/iris | docs/iris/src/userguide/regridding_plots/regridded_to_global_area_weighted.py | 17 | 1646 |
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import iris
import iris.analysis
import iris.plot as iplt
import matplotlib.pyplot as plt
import matplotlib.colors
import numpy as np
global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
regional_ash = iris.load_cube(iris.sample_data_path('NAME_output.txt'))
regional_ash = regional_ash.collapsed('flight_level', iris.analysis.SUM)
# Mask values so low that they are anomalous.
regional_ash.data = np.ma.masked_less(regional_ash.data, 5e-6)
norm = matplotlib.colors.LogNorm(5e-6, 0.0175)
global_air_temp.coord('longitude').guess_bounds()
global_air_temp.coord('latitude').guess_bounds()
fig = plt.figure(figsize=(8, 4.5))
plt.subplot(2, 2, 1)
iplt.pcolormesh(regional_ash, norm=norm)
plt.title('Volcanic ash total\nconcentration not regridded',
size='medium')
for subplot_num, mdtol in zip([2, 3, 4], [0, 0.5, 1]):
plt.subplot(2, 2, subplot_num)
scheme = iris.analysis.AreaWeighted(mdtol=mdtol)
global_ash = regional_ash.regrid(global_air_temp, scheme)
iplt.pcolormesh(global_ash, norm=norm)
plt.title('Volcanic ash total concentration\n'
'regridded with AreaWeighted(mdtol={})'.format(mdtol),
size='medium')
plt.subplots_adjust(hspace=0, wspace=0.05,
left=0.001, right=0.999, bottom=0, top=0.955)
# Iterate over each of the figure's axes, adding coastlines, gridlines
# and setting the extent.
for ax in fig.axes:
ax.coastlines('50m')
ax.gridlines()
ax.set_extent([-80, 40, 31, 75])
plt.show()
| lgpl-3.0 |
NunoEdgarGub1/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
iyounus/incubator-systemml | src/main/python/tests/test_mllearn_numpy.py | 4 | 8902 | #!/usr/bin/python
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# To run:
# - Python 2: `PYSPARK_PYTHON=python2 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_numpy.py`
# - Python 3: `PYSPARK_PYTHON=python3 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_numpy.py`
# Make the `systemml` package importable
import os
import sys
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.insert(0, path)
import unittest
import numpy as np
from pyspark.context import SparkContext
from pyspark.ml import Pipeline
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import SparkSession
from sklearn import datasets, metrics, neighbors
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score, r2_score
from systemml.mllearn import LinearRegression, LogisticRegression, NaiveBayes, SVM
from sklearn import linear_model
sc = SparkContext()
sparkSession = SparkSession.builder.getOrCreate()
import os
def writeColVector(X, fileName):
fileName = os.path.join(os.getcwd(), fileName)
X.tofile(fileName, sep='\n')
metaDataFileContent = '{ "data_type": "matrix", "value_type": "double", "rows":' + str(len(X)) + ', "cols": 1, "nnz": -1, "format": "csv", "author": "systemml-tests", "created": "0000-00-00 00:00:00 PST" }'
with open(fileName+'.mtd', 'w') as text_file:
text_file.write(metaDataFileContent)
def deleteIfExists(fileName):
try:
os.remove(fileName)
except OSError:
pass
# Currently not integrated with JUnit test
# ~/spark-1.6.1-scala-2.11/bin/spark-submit --master local[*] --driver-class-path SystemML.jar test.py
class TestMLLearn(unittest.TestCase):
def test_logistic(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
logistic = LogisticRegression(sparkSession)
logistic.fit(X_train, y_train)
mllearn_predicted = logistic.predict(X_test)
sklearn_logistic = linear_model.LogisticRegression()
sklearn_logistic.fit(X_train, y_train)
self.failUnless(accuracy_score(sklearn_logistic.predict(X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_logistic_mlpipeline(self):
training = sparkSession.createDataFrame([
("a b c d e spark", 1.0),
("b d", 2.0),
("spark f g h", 1.0),
("hadoop mapreduce", 2.0),
("b spark who", 1.0),
("g d a y", 2.0),
("spark fly", 1.0),
("was mapreduce", 2.0),
("e spark program", 1.0),
("a e c l", 2.0),
("spark compile", 1.0),
("hadoop software", 2.0)
], ["text", "label"])
tokenizer = Tokenizer(inputCol="text", outputCol="words")
hashingTF = HashingTF(inputCol="words", outputCol="features", numFeatures=20)
lr = LogisticRegression(sparkSession)
pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
model = pipeline.fit(training)
test = sparkSession.createDataFrame([
("spark i j k", 1.0),
("l m n", 2.0),
("mapreduce spark", 1.0),
("apache hadoop", 2.0)], ["text", "label"])
result = model.transform(test)
predictionAndLabels = result.select("prediction", "label")
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
evaluator = MulticlassClassificationEvaluator()
score = evaluator.evaluate(predictionAndLabels)
self.failUnless(score == 1.0)
def test_linear_regression(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='direct-solve')
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression_cg(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='newton-cg')
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_svm(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
svm = SVM(sparkSession, is_multi_class=True, tol=0.0001)
mllearn_predicted = svm.fit(X_train, y_train).predict(X_test)
from sklearn import linear_model, svm
clf = svm.LinearSVC()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
accuracy = accuracy_score(sklearn_predicted, mllearn_predicted)
evaluation = 'test_svm accuracy_score(sklearn_predicted, mllearn_predicted) was {}'.format(accuracy)
self.failUnless(accuracy > 0.95, evaluation)
def test_naive_bayes(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
nb = NaiveBayes(sparkSession)
mllearn_predicted = nb.fit(X_train, y_train).predict(X_test)
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )
def test_naive_bayes1(self):
categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']
newsgroups_train = fetch_20newsgroups(subset='train', categories=categories)
newsgroups_test = fetch_20newsgroups(subset='test', categories=categories)
vectorizer = TfidfVectorizer()
# Both vectors and vectors_test are SciPy CSR matrix
vectors = vectorizer.fit_transform(newsgroups_train.data)
vectors_test = vectorizer.transform(newsgroups_test.data)
nb = NaiveBayes(sparkSession)
mllearn_predicted = nb.fit(vectors, newsgroups_train.target).predict(vectors_test)
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
sklearn_predicted = clf.fit(vectors, newsgroups_train.target).predict(vectors_test)
self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ozak/geopandas | geopandas/plotting.py | 1 | 19833 | from __future__ import print_function
from distutils.version import LooseVersion
import warnings
import numpy as np
import pandas as pd
def _flatten_multi_geoms(geoms, colors=None):
"""
Returns Series like geoms and colors, except that any Multi geometries
are split into their components and colors are repeated for all component
in the same Multi geometry. Maintains 1:1 matching of geometry to color.
Passing `color` is optional, and when no `color` is passed a list of None
values is returned as `component_colors`.
"Colors" are treated opaquely and so can actually contain any values.
Returns
-------
components : list of geometry
component_colors : list of whatever type `colors` contains
"""
if colors is None:
colors = [None] * len(geoms)
components, component_colors = [], []
if not geoms.geom_type.str.startswith('Multi').any():
return geoms, colors
# precondition, so zip can't short-circuit
assert len(geoms) == len(colors)
for geom, color in zip(geoms, colors):
if geom.type.startswith('Multi'):
for poly in geom:
components.append(poly)
# repeat same color for all components
component_colors.append(color)
else:
components.append(geom)
component_colors.append(color)
return components, component_colors
def plot_polygon_collection(ax, geoms, values=None, color=None,
cmap=None, vmin=None, vmax=None, **kwargs):
"""
Plots a collection of Polygon and MultiPolygon geometries to `ax`
Parameters
----------
ax : matplotlib.axes.Axes
where shapes will be plotted
geoms : a sequence of `N` Polygons and/or MultiPolygons (can be mixed)
values : a sequence of `N` values, optional
Values will be mapped to colors using vmin/vmax/cmap. They should
have 1:1 correspondence with the geometries (not their components).
Otherwise follows `color` / `facecolor` kwargs.
edgecolor : single color or sequence of `N` colors
Color for the edge of the polygons
facecolor : single color or sequence of `N` colors
Color to fill the polygons. Cannot be used together with `values`.
color : single color or sequence of `N` colors
Sets both `edgecolor` and `facecolor`
**kwargs
Additional keyword arguments passed to the collection
Returns
-------
collection : matplotlib.collections.Collection that was plotted
"""
try:
from descartes.patch import PolygonPatch
except ImportError:
raise ImportError("The descartes package is required"
" for plotting polygons in geopandas.")
from matplotlib.collections import PatchCollection
geoms, values = _flatten_multi_geoms(geoms, values)
if None in values:
values = None
# PatchCollection does not accept some kwargs.
if 'markersize' in kwargs:
del kwargs['markersize']
# color=None overwrites specified facecolor/edgecolor with default color
if color is not None:
kwargs['color'] = color
collection = PatchCollection([PolygonPatch(poly) for poly in geoms],
**kwargs)
if values is not None:
collection.set_array(np.asarray(values))
collection.set_cmap(cmap)
collection.set_clim(vmin, vmax)
ax.add_collection(collection, autolim=True)
ax.autoscale_view()
return collection
def plot_linestring_collection(ax, geoms, values=None, color=None,
cmap=None, vmin=None, vmax=None, **kwargs):
"""
Plots a collection of LineString and MultiLineString geometries to `ax`
Parameters
----------
ax : matplotlib.axes.Axes
where shapes will be plotted
geoms : a sequence of `N` LineStrings and/or MultiLineStrings (can be
mixed)
values : a sequence of `N` values, optional
Values will be mapped to colors using vmin/vmax/cmap. They should
have 1:1 correspondence with the geometries (not their components).
color : single color or sequence of `N` colors
Cannot be used together with `values`.
Returns
-------
collection : matplotlib.collections.Collection that was plotted
"""
from matplotlib.collections import LineCollection
geoms, values = _flatten_multi_geoms(geoms, values)
if None in values:
values = None
# LineCollection does not accept some kwargs.
if 'markersize' in kwargs:
del kwargs['markersize']
# color=None gives black instead of default color cycle
if color is not None:
kwargs['color'] = color
segments = [np.array(linestring)[:, :2] for linestring in geoms]
collection = LineCollection(segments, **kwargs)
if values is not None:
collection.set_array(np.asarray(values))
collection.set_cmap(cmap)
collection.set_clim(vmin, vmax)
ax.add_collection(collection, autolim=True)
ax.autoscale_view()
return collection
def plot_point_collection(ax, geoms, values=None, color=None,
cmap=None, vmin=None, vmax=None,
marker='o', markersize=None, **kwargs):
"""
Plots a collection of Point and MultiPoint geometries to `ax`
Parameters
----------
ax : matplotlib.axes.Axes
where shapes will be plotted
geoms : sequence of `N` Points or MultiPoints
values : a sequence of `N` values, optional
Values mapped to colors using vmin, vmax, and cmap.
Cannot be specified together with `color`.
markersize : scalar or array-like, optional
Size of the markers. Note that under the hood ``scatter`` is
used, so the specified value will be proportional to the
area of the marker (size in points^2).
Returns
-------
collection : matplotlib.collections.Collection that was plotted
"""
if values is not None and color is not None:
raise ValueError("Can only specify one of 'values' and 'color' kwargs")
geoms, values = _flatten_multi_geoms(geoms, values)
if None in values:
values = None
x = [p.x for p in geoms]
y = [p.y for p in geoms]
# matplotlib 1.4 does not support c=None, and < 2.0 does not support s=None
if values is not None:
kwargs['c'] = values
if markersize is not None:
kwargs['s'] = markersize
collection = ax.scatter(x, y, color=color, vmin=vmin, vmax=vmax, cmap=cmap,
marker=marker, **kwargs)
return collection
def plot_series(s, cmap=None, color=None, ax=None, figsize=None, **style_kwds):
"""
Plot a GeoSeries.
Generate a plot of a GeoSeries geometry with matplotlib.
Parameters
----------
s : Series
The GeoSeries to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
cmap : str (default None)
The name of a colormap recognized by matplotlib. Any
colormap will work, but categorical colormaps are
generally recommended. Examples of useful discrete
colormaps include:
tab10, tab20, Accent, Dark2, Paired, Pastel1, Set1, Set2
color : str (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
figsize : pair of floats (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
ax is given explicitly, figsize is ignored.
**style_kwds : dict
Color options to be passed on to the actual plot function, such
as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,
``alpha``.
Returns
-------
ax : matplotlib axes instance
"""
if 'colormap' in style_kwds:
warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)", FutureWarning)
cmap = style_kwds.pop('colormap')
if 'axes' in style_kwds:
warnings.warn("'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)", FutureWarning)
ax = style_kwds.pop('axes')
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
if s.empty:
warnings.warn("The GeoSeries you are attempting to plot is "
"empty. Nothing has been displayed.", UserWarning)
return ax
# if cmap is specified, create range of colors based on cmap
values = None
if cmap is not None:
values = np.arange(len(s))
if hasattr(cmap, 'N'):
values = values % cmap.N
style_kwds['vmin'] = style_kwds.get('vmin', values.min())
style_kwds['vmax'] = style_kwds.get('vmax', values.max())
geom_types = s.geometry.type
poly_idx = np.asarray((geom_types == 'Polygon')
| (geom_types == 'MultiPolygon'))
line_idx = np.asarray((geom_types == 'LineString')
| (geom_types == 'MultiLineString'))
point_idx = np.asarray((geom_types == 'Point')
| (geom_types == 'MultiPoint'))
# plot all Polygons and all MultiPolygon components in the same collection
polys = s.geometry[poly_idx]
if not polys.empty:
# color overrides both face and edgecolor. As we want people to be
# able to use edgecolor as well, pass color to facecolor
facecolor = style_kwds.pop('facecolor', None)
if color is not None:
facecolor = color
values_ = values[poly_idx] if cmap else None
plot_polygon_collection(ax, polys, values_, facecolor=facecolor,
cmap=cmap, **style_kwds)
# plot all LineStrings and MultiLineString components in same collection
lines = s.geometry[line_idx]
if not lines.empty:
values_ = values[line_idx] if cmap else None
plot_linestring_collection(ax, lines, values_, color=color, cmap=cmap,
**style_kwds)
# plot all Points in the same collection
points = s.geometry[point_idx]
if not points.empty:
values_ = values[point_idx] if cmap else None
plot_point_collection(ax, points, values_, color=color, cmap=cmap,
**style_kwds)
plt.draw()
return ax
def plot_dataframe(df, column=None, cmap=None, color=None, ax=None,
categorical=False, legend=False, scheme=None, k=5,
vmin=None, vmax=None, markersize=None, figsize=None,
legend_kwds=None, **style_kwds):
"""
Plot a GeoDataFrame.
Generate a plot of a GeoDataFrame with matplotlib. If a
column is specified, the plot coloring will be based on values
in that column.
Parameters
----------
df : GeoDataFrame
The GeoDataFrame to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
column : str, np.array, pd.Series (default None)
The name of the dataframe column, np.array, or pd.Series to be plotted.
If np.array or pd.Series are used then it must have same length as
dataframe. Values are used to color the plot. Ignored if `color` is
also set.
cmap : str (default None)
The name of a colormap recognized by matplotlib.
color : str (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
categorical : bool (default False)
If False, cmap will reflect numerical values of the
column being plotted. For non-numerical columns, this
will be set to True.
legend : bool (default False)
Plot a legend. Ignored if no `column` is given, or if `color` is given.
scheme : str (default None)
Name of a choropleth classification scheme (requires PySAL).
A pysal.esda.mapclassify.Map_Classifier object will be used
under the hood. Supported schemes: 'Equal_interval', 'Quantiles',
'Fisher_Jenks'
k : int (default 5)
Number of classes (ignored if scheme is None)
vmin : None or float (default None)
Minimum value of cmap. If None, the minimum data value
in the column to be plotted is used.
vmax : None or float (default None)
Maximum value of cmap. If None, the maximum data value
in the column to be plotted is used.
markersize : str or float or sequence (default None)
Only applies to point geometries within a frame.
If a str, will use the values in the column of the frame specified
by markersize to set the size of markers. Otherwise can be a value
to apply to all points, or a sequence of the same length as the
number of points.
figsize : tuple of integers (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
axes is given explicitly, figsize is ignored.
legend_kwds : dict (default None)
Keyword arguments to pass to ax.legend()
**style_kwds : dict
Color options to be passed on to the actual plot function, such
as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,
``alpha``.
Returns
-------
ax : matplotlib axes instance
"""
if 'colormap' in style_kwds:
warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)", FutureWarning)
cmap = style_kwds.pop('colormap')
if 'axes' in style_kwds:
warnings.warn("'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)", FutureWarning)
ax = style_kwds.pop('axes')
if column is not None and color is not None:
warnings.warn("Only specify one of 'column' or 'color'. Using "
"'color'.", UserWarning)
column = None
import matplotlib
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
if df.empty:
warnings.warn("The GeoDataFrame you are attempting to plot is "
"empty. Nothing has been displayed.", UserWarning)
return ax
if isinstance(markersize, str):
markersize = df[markersize].values
if column is None:
return plot_series(df.geometry, cmap=cmap, color=color, ax=ax,
figsize=figsize, markersize=markersize,
**style_kwds)
# To accept pd.Series and np.arrays as column
if isinstance(column, (np.ndarray, pd.Series)):
if column.shape[0] != df.shape[0]:
raise ValueError("The dataframe and given column have different "
"number of rows.")
else:
values = np.asarray(column)
else:
values = np.asarray(df[column])
if values.dtype is np.dtype('O'):
categorical = True
# Define `values` as a Series
if categorical:
if cmap is None:
if LooseVersion(matplotlib.__version__) >= '2.0.1':
cmap = 'tab10'
elif LooseVersion(matplotlib.__version__) >= '2.0.0':
# Erroneous name.
cmap = 'Vega10'
else:
cmap = 'Set1'
categories = list(set(values))
categories.sort()
valuemap = dict((k, v) for (v, k) in enumerate(categories))
values = np.array([valuemap[k] for k in values])
if scheme is not None:
binning = __pysal_choro(values, scheme, k=k)
# set categorical to True for creating the legend
categorical = True
binedges = [values.min()] + binning.bins.tolist()
categories = ['{0:.2f} - {1:.2f}'.format(binedges[i], binedges[i+1])
for i in range(len(binedges)-1)]
values = np.array(binning.yb)
mn = values.min() if vmin is None else vmin
mx = values.max() if vmax is None else vmax
geom_types = df.geometry.type
poly_idx = np.asarray((geom_types == 'Polygon')
| (geom_types == 'MultiPolygon'))
line_idx = np.asarray((geom_types == 'LineString')
| (geom_types == 'MultiLineString'))
point_idx = np.asarray((geom_types == 'Point')
| (geom_types == 'MultiPoint'))
# plot all Polygons and all MultiPolygon components in the same collection
polys = df.geometry[poly_idx]
if not polys.empty:
plot_polygon_collection(ax, polys, values[poly_idx],
vmin=mn, vmax=mx, cmap=cmap, **style_kwds)
# plot all LineStrings and MultiLineString components in same collection
lines = df.geometry[line_idx]
if not lines.empty:
plot_linestring_collection(ax, lines, values[line_idx],
vmin=mn, vmax=mx, cmap=cmap, **style_kwds)
# plot all Points in the same collection
points = df.geometry[point_idx]
if not points.empty:
if isinstance(markersize, np.ndarray):
markersize = markersize[point_idx]
plot_point_collection(ax, points, values[point_idx], vmin=mn, vmax=mx,
markersize=markersize, cmap=cmap,
**style_kwds)
if legend and not color:
from matplotlib.lines import Line2D
from matplotlib.colors import Normalize
from matplotlib import cm
norm = Normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
if categorical:
patches = []
for value, cat in enumerate(categories):
patches.append(
Line2D([0], [0], linestyle="none", marker="o",
alpha=style_kwds.get('alpha', 1), markersize=10,
markerfacecolor=n_cmap.to_rgba(value)))
if legend_kwds is None:
legend_kwds = {}
legend_kwds.setdefault('numpoints', 1)
legend_kwds.setdefault('loc', 'best')
ax.legend(patches, categories, **legend_kwds)
else:
n_cmap.set_array([])
ax.get_figure().colorbar(n_cmap, ax=ax)
plt.draw()
return ax
def __pysal_choro(values, scheme, k=5):
"""
Wrapper for choropleth schemes from PySAL for use with plot_dataframe
Parameters
----------
values
Series to be plotted
scheme : str
One of pysal.esda.mapclassify classification schemes
Options are 'Equal_interval', 'Quantiles', 'Fisher_Jenks'
k : int
number of classes (2 <= k <=9)
Returns
-------
binning
Binning objects that holds the Series with values replaced with
class identifier and the bins.
"""
try:
from pysal.esda.mapclassify import (
Quantiles, Equal_Interval, Fisher_Jenks)
schemes = {}
schemes['equal_interval'] = Equal_Interval
schemes['quantiles'] = Quantiles
schemes['fisher_jenks'] = Fisher_Jenks
scheme = scheme.lower()
if scheme not in schemes:
raise ValueError("Invalid scheme. Scheme must be in the"
" set: %r" % schemes.keys())
binning = schemes[scheme](values, k)
return binning
except ImportError:
raise ImportError("PySAL is required to use the 'scheme' keyword")
| bsd-3-clause |
parejkoj/AstroHackWeek2015 | day3-machine-learning/solutions/linear_models.py | 14 | 1188 | from pprint import pprint
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.svm import LinearSVC
digits = load_digits()
X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target % 2)
grid = GridSearchCV(LinearSVC(), param_grid={'C': np.logspace(-6, 2, 9)}, cv=5)
grid.fit(X_train, y_train)
pprint(grid.grid_scores_)
pprint(grid.score(X_test, y_test))
Cs = [10, 1, .01, 0.001, 0.0001]
for penalty in ['l1', 'l2']:
svm_models = {}
training_scores = []
test_scores = []
for C in Cs:
svm = LinearSVC(C=C, penalty=penalty, dual=False).fit(X_train, y_train)
training_scores.append(svm.score(X_train, y_train))
test_scores.append(svm.score(X_test, y_test))
svm_models[C] = svm
plt.figure()
plt.plot(training_scores, label="training scores")
plt.plot(test_scores, label="test scores")
plt.xticks(range(4), Cs)
plt.legend(loc="best")
plt.figure(figsize=(10, 5))
for i, C in enumerate(Cs):
plt.plot(svm_models[C].coef_.ravel(), "o", label="C = %.2f" % C)
plt.legend(loc="best")
| gpl-2.0 |
nwillemse/nctrader | examples/pandas_examples/test_pandas_examples.py | 1 | 2703 | """
Test examples
One example can be test individually using:
$ nosetests -s -v examples/test_examples.py:TestExamples.test_strategy_backtest
"""
import os
import unittest
from nctrader import settings
import examples.pandas_examples.pandas_bar_display_prices_backtest
import examples.pandas_examples.pandas_tick_strategy_backtest
class TestPandasExamples(unittest.TestCase):
"""
Test example are executing correctly
"""
def setUp(self):
"""
Set up configuration.
"""
self.config = settings.TEST
self.testing = True
self.max_rows = 20
self.cache_name = ''
self.cache_backend = 'sqlite'
self.expire_after = '24:00:00.0'
self.n = 100
self.n_window = 5
def test_pandas_bar_display_prices_backtest(self):
data_source = 'yahoo'
start = '2010-01-04'
end = '2016-06-22'
tickers = ["^GSPC"]
filename = os.path.join(settings.TEST.OUTPUT_DIR, "pandas_bar_display_prices_backtest.pkl")
results = examples.pandas_examples.pandas_bar_display_prices_backtest.run(self.cache_name, self.cache_backend, self.expire_after, data_source, start, end, self.config, self.testing, tickers, filename, self.n, self.n_window)
self.assertAlmostEqual(float(results['sharpe']), 0.5968)
def test_pandas_bar_display_prices_backtest_multi(self):
data_source = 'yahoo'
start = '2010-01-04'
end = '2016-06-22'
tickers = ["MSFT", "GOOG"]
filename = os.path.join(settings.TEST.OUTPUT_DIR, "pandas_bar_display_prices_backtest_multi.pkl")
results = examples.pandas_examples.pandas_bar_display_prices_backtest.run(self.cache_name, self.cache_backend, self.expire_after, data_source, start, end, self.config, self.testing, tickers, filename, self.n, self.n_window)
self.assertAlmostEqual(float(results['sharpe']), 0.3544)
def test_pandas_tick_strategy_backtest(self):
tickers = ["GOOG"]
filename = os.path.join(settings.TEST.OUTPUT_DIR, "pandas_tick_strategy_backtest.pkl")
results = examples.pandas_examples.pandas_tick_strategy_backtest.run(self.config, self.testing, tickers, filename, self.n, self.n_window)
self.assertAlmostEqual(float(results['sharpe']), -7.1351)
def test_pandas_tick_strategy_backtest_multi(self):
tickers = ["GOOG", "MSFT"]
filename = os.path.join(settings.TEST.OUTPUT_DIR, "pandas_tick_strategy_backtest_multi.pkl")
results = examples.pandas_examples.pandas_tick_strategy_backtest.run(self.config, self.testing, tickers, filename, self.n, self.n_window)
self.assertAlmostEqual(float(results['sharpe']), -5.0262)
| mit |
dingliumath/quant-econ | examples/illustrates_lln.py | 7 | 1802 | """
Filename: illustrates_lln.py
Authors: John Stachurski and Thomas J. Sargent
Visual illustration of the law of large numbers.
"""
import random
import numpy as np
from scipy.stats import t, beta, lognorm, expon, gamma, poisson
import matplotlib.pyplot as plt
n = 100
# == Arbitrary collection of distributions == #
distributions = {"student's t with 10 degrees of freedom": t(10),
"beta(2, 2)": beta(2, 2),
"lognormal LN(0, 1/2)": lognorm(0.5),
"gamma(5, 1/2)": gamma(5, scale=2),
"poisson(4)": poisson(4),
"exponential with lambda = 1": expon(1)}
# == Create a figure and some axes == #
num_plots = 3
fig, axes = plt.subplots(num_plots, 1, figsize=(10, 10))
# == Set some plotting parameters to improve layout == #
bbox = (0., 1.02, 1., .102)
legend_args = {'ncol': 2,
'bbox_to_anchor': bbox,
'loc': 3,
'mode': 'expand'}
plt.subplots_adjust(hspace=0.5)
for ax in axes:
# == Choose a randomly selected distribution == #
name = random.choice(list(distributions.keys()))
distribution = distributions.pop(name)
# == Generate n draws from the distribution == #
data = distribution.rvs(n)
# == Compute sample mean at each n == #
sample_mean = np.empty(n)
for i in range(n):
sample_mean[i] = np.mean(data[:i+1])
# == Plot == #
ax.plot(list(range(n)), data, 'o', color='grey', alpha=0.5)
axlabel = r'$\bar X_n$' + ' for ' + r'$X_i \sim$' + ' ' + name
ax.plot(list(range(n)), sample_mean, 'g-', lw=3, alpha=0.6, label=axlabel)
m = distribution.mean()
ax.plot(list(range(n)), [m] * n, 'k--', lw=1.5, label=r'$\mu$')
ax.vlines(list(range(n)), m, data, lw=0.2)
ax.legend(**legend_args)
plt.show()
| bsd-3-clause |
theandygross/CancerData | src/Processing/InitializeMut.py | 1 | 3444 | """
Created on Jul 2, 2013
@author: agross
Mutation calls are extracted from the annotated MAF files obtained
from Firehose and filtered to include only non-silent mutations. Each
case is associated with a binary vector in which each position
represents a gene; the position is set to 1 if the gene is observed
to harbor one or more mutations in the case and set to 0 otherwise.
Mutation meta-markers are constructed by collapsing the genes within a
pathway gene set via a Boolean or operator, such that the pathway is
considered altered in a case if any of its genes has a mutation. Pathway
markers that are characterized by a single highly mutated gene or are
highly correlated with mutation rate (Mann-Whitney U test P < 0.01) are
filtered out.
"""
import pandas as pd
import pickle as pickle
from Data.Containers import Dataset
from Data.Firehose import get_mutation_matrix
def is_one_gene(genes, df):
"""Test to see if most mutations are due to single gene"""
top_hit = df.ix[genes].sum(1).idxmax()
with_top = df.ix[genes].sum().clip_upper(1).sum()
without = df.ix[genes - {top_hit}].sum().clip_upper(1).sum()
return ((with_top - without) / (without + .1)) > .5
def size_filter(s, min_pat=10):
"""Test if all sufficient feature diversity"""
vc = s.clip_upper(1.).value_counts()
return (len(vc) == 2) and (vc.min() >= min_pat)
class MutDataset(Dataset):
"""
Inherits from Dataset class. Adds some added processing for mutation
data.
"""
def __init__(self, run, cancer, patients=None,
create_features=True):
"""
"""
Dataset.__init__(self, cancer.path, 'Mutation', compressed=False)
self.df = get_mutation_matrix(run.data_path, cancer.name)
if patients is not None:
self.df = self.df.ix[:, patients].dropna(1, how='all')
if create_features is True:
min_pat = run.parameters['min_patients']
self._create_feature_matrix(run.gene_sets, min_pat)
def _create_feature_matrix(self, gene_sets, min_size=10):
"""
Create hit_matrix and meta_matrix, filter out genes for features.
"""
hit_matrix = self.df.fillna(0).clip_upper(1.)
meta_matrix = pd.DataFrame({p: self.df.ix[g].sum() for p, g in
gene_sets.iteritems()}).T
meta_matrix = meta_matrix.fillna(0).clip_upper(1.)
meta_matrix = meta_matrix.dropna()
s = meta_matrix.apply(size_filter, args=(min_size,), axis=1)
meta_matrix = meta_matrix.ix[s]
# s = Series({p: is_one_gene(gene_sets[p], hit_matrix) for p in
# meta_matrix.index})
s = [p for p in meta_matrix.index
if is_one_gene(gene_sets[p], hit_matrix) == False]
meta_matrix = meta_matrix.ix[s]
s = hit_matrix.apply(size_filter, args=(min_size,), axis=1)
hit_matrix = hit_matrix.ix[s]
self.features = meta_matrix.append(hit_matrix)
def initialize_mut(cancer_type, report_path, patients=None,
create_meta_features=True, save=True):
"""
Initialize mutation data for down-stream analysis.
"""
run = pickle.load(open(report_path + '/RunObject.p', 'rb'))
cancer = run.load_cancer(cancer_type)
data = MutDataset(run, cancer, patients, create_meta_features)
if save is True:
data.save()
data.uncompress()
return data
| mit |
pkruskal/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
talbrecht/pism_pik | site-packages/PISM/util.py | 1 | 3498 | # Copyright (C) 2011, 2012, 2013, 2015, 2016 David Maxwell and Constantine Khroulev
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Utility functions/objects that don't have a good home elsewhere."""
import PISM
import time
import sys
def prepare_output(filename):
"Create an output file and prepare it for writing."
ctx = PISM.Context()
output = PISM.PIO(ctx.com, ctx.config.get_string("output.format"),
filename, PISM.PISM_READWRITE_MOVE)
PISM.define_time(output,
ctx.config.get_string("time.dimension_name"),
ctx.config.get_string("time.calendar"),
ctx.time.units_string(),
ctx.unit_system)
PISM.append_time(output,
ctx.config.get_string("time.dimension_name"),
ctx.time.current())
return output
def writeProvenance(outfile, message=None):
"""Saves the time and command line arguments (or the provided `message`) to
the ``history`` attribute of the :file:`.nc` file `outfile`"""
rank = PISM.Context().rank
if rank == 0:
nc = PISM.netCDF.Dataset(outfile, 'a') # append
if message is None:
message = time.asctime() + ': ' + ' '.join(sys.argv)
if 'history' in nc.ncattrs():
nc.history = message + '\n' + nc.history
else:
nc.history = message
nc.source = "PISM " + PISM.PISM_Revision
nc.close()
PISM.Context().com.barrier()
def fileHasVariable(filename, varname):
"""Returns ``True`` if the :file:`.nc` file `filename` contains an attribute named `varname`."""
try:
ds = PISM.netCDF.Dataset(filename)
return varname in ds.variables
finally:
ds.close()
# The following was copied from matplotlib, which copied a python recipe.
class Bunch(object):
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables:
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def has_key(self, k):
"Return True if this Bunch has a given key."
return self.__dict__.has_key(k)
def __getitem__(self, k):
return self.__dict__.get(k)
def update(self, **kwds):
"Update contents of a Bunch using key-value pairs."
self.__dict__.update(**kwds)
def __repr__(self):
keys = self.__dict__.keys()
return 'Bunch(%s)' % ', '.join(['%s=%s' % (k, self.__dict__[k]) for k in keys])
| gpl-3.0 |
zfrenchee/pandas | pandas/tests/frame/test_block_internals.py | 2 | 19846 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import itertools
from numpy import nan
import numpy as np
from pandas import (DataFrame, Series, Timestamp, date_range, compat,
option_context)
from pandas.compat import StringIO
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameBlockInternals(TestData):
def test_cast_internals(self):
casted = DataFrame(self.frame._data, dtype=int)
expected = DataFrame(self.frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(self.frame._data, dtype=np.int32)
expected = DataFrame(self.frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self):
self.frame['E'] = 7.
consolidated = self.frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
self.frame['F'] = 8.
assert len(self.frame._data.blocks) == 3
self.frame._consolidate(inplace=True)
assert len(self.frame._data.blocks) == 1
def test_consolidate_deprecation(self):
self.frame['E'] = 7
with tm.assert_produces_warning(FutureWarning):
self.frame.consolidate()
def test_consolidate_inplace(self):
frame = self.frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
self.frame[chr(letter)] = chr(letter)
def test_values_consolidate(self):
self.frame['E'] = 7.
assert not self.frame._data.is_consolidated()
_ = self.frame.values # noqa
assert self.frame._data.is_consolidated()
def test_modify_values(self):
self.frame.values[5] = 5
assert (self.frame.values[5] == 5).all()
# unconsolidated
self.frame['E'] = 7.
self.frame.values[6] = 6
assert (self.frame.values[6] == 6).all()
def test_boolean_set_uncons(self):
self.frame['E'] = 7.
expected = self.frame.values.copy()
expected[expected > 1] = 2
self.frame[self.frame > 1] = 2
assert_almost_equal(expected, self.frame.values)
def test_values_numeric_cols(self):
self.frame['foo'] = 'bar'
values = self.frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
def test_values_lcd(self):
# mixed lcd
values = self.mixed_float[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = self.mixed_float[['A', 'B', 'C']].values
assert values.dtype == np.float32
values = self.mixed_float[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = self.mixed_int[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = self.mixed_int[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = self.mixed_int[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = self.mixed_int[['B', 'C']].values
assert values.dtype == np.uint64
values = self.mixed_int[['A', 'C']].values
assert values.dtype == np.int32
values = self.mixed_int[['C', 'D']].values
assert values.dtype == np.int64
values = self.mixed_int[['A']].values
assert values.dtype == np.int32
values = self.mixed_int[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A': [2 ** 63 - 1]})
result = df['A']
expected = Series(np.asarray([2 ** 63 - 1], np.int64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2 ** 63]})
result = df['A']
expected = Series(np.asarray([2 ** 63], np.uint64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [datetime(2005, 1, 1), True]})
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [None, 1]})
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, 2]})
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3.0]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, True]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, None]})
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, None]})
result = df['A']
expected = Series(np.asarray(
[1.0 + 2.0j, np.nan], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, True, None]})
result = df['A']
expected = Series(np.asarray(
[2.0, 1, True, None], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, datetime(2006, 1, 1), None]})
result = df['A']
expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_), name='A')
assert_series_equal(result, expected)
def test_construction_with_mixed(self):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check dtypes
result = df.get_dtype_counts().sort_values()
expected = Series({'datetime64[ns]': 3})
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
assert self.mixed_frame['datetime'].dtype == 'M8[ns]'
assert self.mixed_frame['timedelta'].dtype == 'm8[ns]'
result = self.mixed_frame.get_dtype_counts().sort_values()
expected = Series({'float64': 4,
'object': 1,
'datetime64[ns]': 1,
'timedelta64[ns]': 1}).sort_values()
assert_series_equal(result, expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype='timedelta64[s]')
df = DataFrame(index=range(3))
df['A'] = arr
expected = DataFrame({'A': pd.timedelta_range('00:00:01', periods=3,
freq='s')},
index=range(3))
assert_frame_equal(df, expected)
expected = DataFrame({
'dt1': Timestamp('20130101'),
'dt2': date_range('20130101', periods=3),
# 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
}, index=range(3))
df = DataFrame(index=range(3))
df['dt1'] = np.datetime64('2013-01-01')
df['dt2'] = np.array(['2013-01-01', '2013-01-02', '2013-01-03'],
dtype='datetime64[D]')
# df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
data = list(itertools.repeat((datetime(2001, 1, 1),
"aa", 20), 9))
return DataFrame(data=data,
columns=["A", "B", "C"],
dtype=dtype)
pytest.raises(NotImplementedError, f,
[("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
# 10822
# invalid error message on dt inference
if not compat.is_platform_windows():
f('M8[ns]')
def test_equals_different_blocks(self):
# GH 9330
df0 = pd.DataFrame({"A": ["x", "y"], "B": [1, 2],
"C": ["w", "z"]})
df1 = df0.reset_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
assert (df0._data.blocks[0].dtype != df1._data.blocks[0].dtype)
# do the real tests
assert_frame_equal(df0, df1)
assert df0.equals(df1)
assert df1.equals(df0)
def test_copy_blocks(self):
# API/ENH 9607
df = DataFrame(self.frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks()
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
def test_no_copy_blocks(self):
# API/ENH 9607
df = DataFrame(self.frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did change the original DataFrame
assert _df[column].equals(df[column])
def test_copy(self):
cop = self.frame.copy()
cop['E'] = cop['A']
assert 'E' not in self.frame
# copy objects
copy = self.mixed_frame.copy()
assert copy._data is not self.mixed_frame._data
def test_pickle(self):
unpickled = tm.round_trip_pickle(self.mixed_frame)
assert_frame_equal(self.mixed_frame, unpickled)
# buglet
self.mixed_frame._data.ndim
# empty
unpickled = tm.round_trip_pickle(self.empty)
repr(unpickled)
# tz frame
unpickled = tm.round_trip_pickle(self.tzframe)
assert_frame_equal(self.tzframe, unpickled)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = pd.read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize('US/Eastern')
ser_starting = ser_starting.tz_convert('UTC')
ser_starting.index.name = 'starting'
ser_ending = df.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize('US/Eastern')
ser_ending = ser_ending.tz_convert('UTC')
ser_ending.index.name = 'ending'
df.starting = ser_starting.index
df.ending = ser_ending.index
tm.assert_index_equal(pd.DatetimeIndex(
df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def test_is_mixed_type(self):
assert not self.frame._is_mixed_type
assert self.mixed_frame._is_mixed_type
def test_get_numeric_data(self):
# TODO(wesm): unused?
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'f': Timestamp('20010102')},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64': 1,
datetime64name: 1, objectname: 1})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'd': np.array([1.] * 10, dtype='float32'),
'e': np.array([1] * 10, dtype='int32'),
'f': np.array([1] * 10, dtype='int16'),
'g': Timestamp('20010102')},
index=np.arange(10))
result = df._get_numeric_data()
expected = df.loc[:, ['a', 'b', 'd', 'e', 'f']]
assert_frame_equal(result, expected)
only_obj = df.loc[:, ['c', 'g']]
result = only_obj._get_numeric_data()
expected = df.loc[:, []]
assert_frame_equal(result, expected)
df = DataFrame.from_dict(
{'a': [1, 2], 'b': ['foo', 'bar'], 'c': [np.pi, np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({'a': [1, 2], 'c': [np.pi, np.e]})
assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
assert_frame_equal(result, expected)
def test_convert_objects(self):
oops = self.mixed_frame.T.T
converted = oops._convert(datetime=True)
assert_frame_equal(converted, self.mixed_frame)
assert converted['A'].dtype == np.float64
# force numeric conversion
self.mixed_frame['H'] = '1.'
self.mixed_frame['I'] = '1'
# add in some items that will be nan
length = len(self.mixed_frame)
self.mixed_frame['J'] = '1.'
self.mixed_frame['K'] = '1'
self.mixed_frame.loc[0:5, ['J', 'K']] = 'garbled'
converted = self.mixed_frame._convert(datetime=True, numeric=True)
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
assert converted['J'].dtype == 'float64'
assert converted['K'].dtype == 'float64'
assert len(converted['J'].dropna()) == length - 5
assert len(converted['K'].dropna()) == length - 5
# via astype
converted = self.mixed_frame.copy()
converted['H'] = converted['H'].astype('float64')
converted['I'] = converted['I'].astype('int64')
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
# via astype, but errors
converted = self.mixed_frame.copy()
with tm.assert_raises_regex(ValueError, 'invalid literal'):
converted['H'].astype('int32')
# mixed in a single column
df = DataFrame(dict(s=Series([1, 'na', 3, 4])))
result = df._convert(datetime=True, numeric=True)
expected = DataFrame(dict(s=Series([1, np.nan, 3, 4])))
assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = DataFrame(
{'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})
mixed2 = mixed1._convert(datetime=True)
assert_frame_equal(mixed1, mixed2)
def test_infer_objects(self):
# GH 11221
df = DataFrame({'a': ['a', 1, 2, 3],
'b': ['b', 2.0, 3.0, 4.1],
'c': ['c', datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [1, 2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
df = df.iloc[1:].infer_objects()
assert df['a'].dtype == 'int64'
assert df['b'].dtype == 'float64'
assert df['c'].dtype == 'M8[ns]'
assert df['d'].dtype == 'object'
expected = DataFrame({'a': [1, 2, 3],
'b': [2.0, 3.0, 4.1],
'c': [datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
# reconstruct frame to verify inference is same
tm.assert_frame_equal(df.reset_index(drop=True), expected)
def test_stale_cached_series_bug_473(self):
# this is chained, but ok
with option_context('chained_assignment', None):
Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
columns=('e', 'f', 'g', 'h'))
repr(Y)
Y['e'] = Y['e'].astype('object')
Y['g']['c'] = np.NaN
repr(Y)
result = Y.sum() # noqa
exp = Y['g'].sum() # noqa
assert pd.isna(Y['g']['c'])
def test_get_X_columns(self):
# numeric and object columns
df = DataFrame({'a': [1, 2, 3],
'b': [True, False, True],
'c': ['foo', 'bar', 'baz'],
'd': [None, None, None],
'e': [3.14, 0.577, 2.773]})
tm.assert_index_equal(df._get_numeric_data().columns,
pd.Index(['a', 'b', 'e']))
def test_strange_column_corruption_issue(self):
# (wesm) Unclear how exactly this is related to internal matters
df = DataFrame(index=[0, 1])
df[0] = nan
wasCol = {}
# uncommenting these makes the results match
# for col in xrange(100, 200):
# wasCol[col] = 1
# df[col] = nan
for i, dt in enumerate(df.index):
for col in range(100, 200):
if col not in wasCol:
wasCol[col] = 1
df[col] = nan
df[col][dt] = i
myid = 100
first = len(df.loc[pd.isna(df[myid]), [myid]])
second = len(df.loc[pd.isna(df[myid]), [myid]])
assert first == second == 0
| bsd-3-clause |
rosswhitfield/mantid | qt/applications/workbench/workbench/plotting/test/test_figureerrorsmanager.py | 3 | 5733 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
import unittest
import matplotlib
matplotlib.use("AGG") # noqa
import matplotlib.pyplot as plt
from numpy import array_equal
# Pulling in the MantidAxes registers the 'mantid' projection
from mantid.simpleapi import CreateWorkspace
from mantidqt.utils.qt.testing import start_qapplication
from workbench.plotting.figureerrorsmanager import FigureErrorsManager
def plot_things(make_them_errors):
def function_reference(func):
def function_parameters(self):
if not make_them_errors:
# plot this line with specNum
self.ax.plot(self.ws2d_histo, specNum=1)
# and another one with wkspIndex
self.ax.plot(self.ws2d_histo, wkspIndex=2)
else:
self.ax.errorbar(self.ws2d_histo, specNum=1)
self.ax.errorbar(self.ws2d_histo, wkspIndex=2)
return func(self)
return function_parameters
return function_reference
@start_qapplication
class FigureErrorsManagerTest(unittest.TestCase):
"""
Test class that covers the interaction of the FigureErrorsManager with plots
that use the mantid projection and have MantidAxes
"""
@classmethod
def setUpClass(cls):
cls.ws2d_histo = CreateWorkspace(DataX=[10, 20, 30, 10, 20, 30, 10, 20, 30],
DataY=[2, 3, 4, 5, 3, 5],
DataE=[1, 2, 3, 4, 1, 1],
NSpec=3,
Distribution=True,
UnitX='Wavelength',
VerticalAxisUnit='DeltaE',
VerticalAxisValues=[4, 6, 8],
OutputWorkspace='ws2d_histo')
# initialises the QApplication
super(cls, FigureErrorsManagerTest).setUpClass()
def setUp(self):
self.fig, self.ax = plt.subplots(subplot_kw={'projection': 'mantid'})
self.errors_manager = FigureErrorsManager(self.fig.canvas)
def tearDown(self):
plt.close('all')
del self.fig
del self.ax
del self.errors_manager
@plot_things(make_them_errors=False)
def test_show_all_errors(self):
# assert plot does not have errors
self.assertEqual(0, len(self.ax.containers))
self.errors_manager.toggle_all_errors(self.ax, make_visible=True)
# check that the errors have been added
self.assertEqual(2, len(self.ax.containers))
@plot_things(make_them_errors=True)
def test_hide_all_errors(self):
self.assertEqual(2, len(self.ax.containers))
self.errors_manager.toggle_all_errors(self.ax, make_visible=False)
# errors still exist
self.assertEqual(2, len(self.ax.containers))
# they are just invisible
self.assertFalse(self.ax.containers[0][2][0].get_visible())
@plot_things(make_them_errors=True)
def test_hide_all_errors_retains_legend_properties(self):
# create a legend with a title
self.ax.legend(title="Test")
self.errors_manager.toggle_all_errors(self.ax, make_visible=False)
# check that the legend still has a title
self.assertEqual(self.ax.get_legend().get_title().get_text(), "Test")
@plot_things(make_them_errors=False)
def test_show_all_errors_retains_legend_properties(self):
# create a legend with a title
self.ax.legend(title="Test")
self.errors_manager.toggle_all_errors(self.ax, make_visible=True)
# check that the legend still has a title
self.assertEqual(self.ax.get_legend().get_title().get_text(), "Test")
def test_curve_has_all_errorbars_on_replot_after_error_every_increase(self):
curve = self.ax.errorbar([0, 1, 2, 4], [0, 1, 2, 4], yerr=[0.1, 0.2, 0.3, 0.4])
new_curve = FigureErrorsManager._replot_mpl_curve(self.ax, curve, {'errorevery': 2})
self.assertEqual(2, len(new_curve[2][0].get_segments()))
new_curve = FigureErrorsManager._replot_mpl_curve(self.ax, new_curve, {'errorevery': 1})
self.assertTrue(hasattr(new_curve, 'errorbar_data'))
self.assertEqual(4, len(new_curve[2][0].get_segments()))
def test_show_all_errors_on_waterfall_plot_retains_waterfall(self):
self.ax.plot([0, 1], [0, 1])
self.ax.plot([0, 1], [0, 1])
self.ax.set_waterfall(True)
self.errors_manager.toggle_all_errors(self.ax, make_visible=True)
self.assertFalse(array_equal(self.ax.get_lines()[0].get_data(), self.ax.get_lines()[1].get_data()))
def test_hide_all_errors_on_waterfall_plot_retains_waterfall(self):
self.ax.plot([0, 1], [0, 1])
self.ax.plot([0, 1], [0, 1])
self.ax.set_waterfall(True)
self.errors_manager.toggle_all_errors(self.ax, make_visible=True)
self.errors_manager.toggle_all_errors(self.ax, make_visible=False)
self.assertFalse(array_equal(self.ax.get_lines()[0].get_data(), self.ax.get_lines()[1].get_data()))
def test_creation_args_not_accessed_for_non_workspace_plots(self):
self.ax.plot([1, 2], [1, 2])
self.errors_manager.replot_curve(self.ax, self.ax.lines[0], {})
self.assertEqual(0, len(self.ax.creation_args))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
jmlon/PythonTutorials | pandas/logAnalyzer.py | 1 | 2734 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Read an apache log file into a Pandas Dataframe.
Analyze the data, produce graphs for the analysis.
Author: Jorge Londoño
Date: 2018-01-12
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from dateutil.parser import *
def apacheDateParser(x,y):
'''Parses a string into a datetime object'''
return parse(x+' '+y, fuzzy=True)
def myIntParser(x):
'''Converts a string into an integer, otherwise return NaN'''
try:
# Throws a ValueError exception if is not a valid integer
return int(x)
except ValueError:
return np.nan
data = pd.read_csv('access_log', encoding='iso-8859-1',
delim_whitespace=True,
header=None,
parse_dates={ 'dt': [3,4] },
date_parser=apacheDateParser,
index_col=0,
names=['client','id','user','datetime','tz','request','status','size','referer','agent'],
converters={ 'status':myIntParser, 'size': myIntParser },
dtype={ 'referer': object, 'agent': object } )
print(data.shape)
print(data.head())
print(data.dtypes)
print(data['size'].mean())
print(data['size'].std())
print(data['size'][data['size'].isnull()].head())
print(data['size'].count())
grpHitsWDay = data[['id']].groupby(data.index.weekday, sort=False)
print(grpHitsWDay)
print(grpHitsWDay.indices)
print(grpHitsWDay.count())
hits = grpHitsWDay.count()
hits.index = [ 'Mon','Tue','Wed','Thu','Fri','Sat','Sun' ]
hits.columns = [ 'Hits' ]
print(hits)
print(hits.describe())
hits.plot(kind='bar', figsize=(8,6), colormap='summer', title='Hits per weekday', legend=False)
plt.show()
grpWDay = data[ ['id','size'] ].groupby(data.index.weekday)
stats = grpWDay.aggregate({ 'id':lambda x: x.count(), 'size':np.sum })
print(stats)
stats = grpWDay.aggregate({ 'id':lambda x: x.count(), 'size':np.sum }).rename(columns={'size':'Bytes', 'id':'Hits'})
stats.index=[ 'Mon','Tue','Wed','Thu','Fri','Sat','Sun' ]
print(stats)
stats.plot(kind='bar', figsize=(8,6), colormap='summer', title='Hits & bytes per weekday', subplots=True)
plt.show()
print(data['request'].head(10))
data['resource'] = data['request'].apply(lambda x: x.split()[1])
print(data['resource'].head(10))
grpRsc = data[ ['id','size'] ].groupby(data['resource'])
stats = grpRsc.aggregate({ 'id':lambda x: x.count(), 'size':np.sum }).rename(columns={'size':'XferBytes', 'id':'Hits'})
print(stats)
sortedh = stats.sort_values(by='Hits', ascending=False)
print(sortedh.head(10))
sortedb = stats.sort_values(by='XferBytes', ascending=False)
print(sortedb.head(10))
sortedb.head(10).plot(kind='bar', figsize=(8,5), colormap='summer', title='Xfer & Hits (sorted by Xfer)', subplots=True)
plt.show()
| gpl-3.0 |
JohannesHoppe/docker-ubuntu-vnc-desktop | noVNC/utils/json2graph.py | 46 | 6674 | #!/usr/bin/env python
'''
Use matplotlib to generate performance charts
Copyright 2011 Joel Martin
Licensed under MPL-2.0 (see docs/LICENSE.MPL-2.0)
'''
# a bar plot with errorbars
import sys, json, pprint
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
def usage():
print "%s json_file level1 level2 level3 [legend_height]\n\n" % sys.argv[0]
print "Description:\n"
print "level1, level2, and level3 are one each of the following:\n";
print " select=ITEM - select only ITEM at this level";
print " bar - each item on this level becomes a graph bar";
print " group - items on this level become groups of bars";
print "\n";
print "json_file is a file containing json data in the following format:\n"
print ' {';
print ' "conf": {';
print ' "order_l1": [';
print ' "level1_label1",';
print ' "level1_label2",';
print ' ...';
print ' ],';
print ' "order_l2": [';
print ' "level2_label1",';
print ' "level2_label2",';
print ' ...';
print ' ],';
print ' "order_l3": [';
print ' "level3_label1",';
print ' "level3_label2",';
print ' ...';
print ' ]';
print ' },';
print ' "stats": {';
print ' "level1_label1": {';
print ' "level2_label1": {';
print ' "level3_label1": [val1, val2, val3],';
print ' "level3_label2": [val1, val2, val3],';
print ' ...';
print ' },';
print ' "level2_label2": {';
print ' ...';
print ' },';
print ' },';
print ' "level1_label2": {';
print ' ...';
print ' },';
print ' ...';
print ' },';
print ' }';
sys.exit(2)
def error(msg):
print msg
sys.exit(1)
#colors = ['#ff0000', '#0863e9', '#00f200', '#ffa100',
# '#800000', '#805100', '#013075', '#007900']
colors = ['#ff0000', '#00ff00', '#0000ff',
'#dddd00', '#dd00dd', '#00dddd',
'#dd6622', '#dd2266', '#66dd22',
'#8844dd', '#44dd88', '#4488dd']
if len(sys.argv) < 5:
usage()
filename = sys.argv[1]
L1 = sys.argv[2]
L2 = sys.argv[3]
L3 = sys.argv[4]
if len(sys.argv) > 5:
legendHeight = float(sys.argv[5])
else:
legendHeight = 0.75
# Load the JSON data from the file
data = json.loads(file(filename).read())
conf = data['conf']
stats = data['stats']
# Sanity check data hierarchy
if len(conf['order_l1']) != len(stats.keys()):
error("conf.order_l1 does not match stats level 1")
for l1 in stats.keys():
if len(conf['order_l2']) != len(stats[l1].keys()):
error("conf.order_l2 does not match stats level 2 for %s" % l1)
if conf['order_l1'].count(l1) < 1:
error("%s not found in conf.order_l1" % l1)
for l2 in stats[l1].keys():
if len(conf['order_l3']) != len(stats[l1][l2].keys()):
error("conf.order_l3 does not match stats level 3")
if conf['order_l2'].count(l2) < 1:
error("%s not found in conf.order_l2" % l2)
for l3 in stats[l1][l2].keys():
if conf['order_l3'].count(l3) < 1:
error("%s not found in conf.order_l3" % l3)
#
# Generate the data based on the level specifications
#
bar_labels = None
group_labels = None
bar_vals = []
bar_sdvs = []
if L3.startswith("select="):
select_label = l3 = L3.split("=")[1]
bar_labels = conf['order_l1']
group_labels = conf['order_l2']
bar_vals = [[0]*len(group_labels) for i in bar_labels]
bar_sdvs = [[0]*len(group_labels) for i in bar_labels]
for b in range(len(bar_labels)):
l1 = bar_labels[b]
for g in range(len(group_labels)):
l2 = group_labels[g]
bar_vals[b][g] = np.mean(stats[l1][l2][l3])
bar_sdvs[b][g] = np.std(stats[l1][l2][l3])
elif L2.startswith("select="):
select_label = l2 = L2.split("=")[1]
bar_labels = conf['order_l1']
group_labels = conf['order_l3']
bar_vals = [[0]*len(group_labels) for i in bar_labels]
bar_sdvs = [[0]*len(group_labels) for i in bar_labels]
for b in range(len(bar_labels)):
l1 = bar_labels[b]
for g in range(len(group_labels)):
l3 = group_labels[g]
bar_vals[b][g] = np.mean(stats[l1][l2][l3])
bar_sdvs[b][g] = np.std(stats[l1][l2][l3])
elif L1.startswith("select="):
select_label = l1 = L1.split("=")[1]
bar_labels = conf['order_l2']
group_labels = conf['order_l3']
bar_vals = [[0]*len(group_labels) for i in bar_labels]
bar_sdvs = [[0]*len(group_labels) for i in bar_labels]
for b in range(len(bar_labels)):
l2 = bar_labels[b]
for g in range(len(group_labels)):
l3 = group_labels[g]
bar_vals[b][g] = np.mean(stats[l1][l2][l3])
bar_sdvs[b][g] = np.std(stats[l1][l2][l3])
else:
usage()
# If group is before bar then flip (zip) the data
if [L1, L2, L3].index("group") < [L1, L2, L3].index("bar"):
bar_labels, group_labels = group_labels, bar_labels
bar_vals = zip(*bar_vals)
bar_sdvs = zip(*bar_sdvs)
print "bar_vals:", bar_vals
#
# Now render the bar graph
#
ind = np.arange(len(group_labels)) # the x locations for the groups
width = 0.8 * (1.0/len(bar_labels)) # the width of the bars
fig = plt.figure(figsize=(10,6), dpi=80)
plot = fig.add_subplot(1, 1, 1)
rects = []
for i in range(len(bar_vals)):
rects.append(plot.bar(ind+width*i, bar_vals[i], width, color=colors[i],
yerr=bar_sdvs[i], align='center'))
# add some
plot.set_ylabel('Milliseconds (less is better)')
plot.set_title("Javascript array test: %s" % select_label)
plot.set_xticks(ind+width)
plot.set_xticklabels( group_labels )
fontP = FontProperties()
fontP.set_size('small')
plot.legend( [r[0] for r in rects], bar_labels, prop=fontP,
loc = 'center right', bbox_to_anchor = (1.0, legendHeight))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
if np.isnan(height):
height = 0.0
plot.text(rect.get_x()+rect.get_width()/2., height+20, '%d'%int(height),
ha='center', va='bottom', size='7')
for rect in rects:
autolabel(rect)
# Adjust axis sizes
axis = list(plot.axis())
axis[0] = -width # Make sure left side has enough for bar
#axis[1] = axis[1] * 1.20 # Add 20% to the right to make sure it fits
axis[2] = 0 # Make y-axis start at 0
axis[3] = axis[3] * 1.10 # Add 10% to the top
plot.axis(axis)
plt.show()
| apache-2.0 |
FrancoisRheaultUS/dipy | dipy/stats/analysis.py | 2 | 10573 |
import os
import numpy as np
from scipy.spatial import cKDTree
from scipy.ndimage.interpolation import map_coordinates
from scipy.spatial.distance import mahalanobis
from dipy.utils.optpkg import optional_package
from dipy.io.utils import save_buan_profiles_hdf5
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import AveragePointwiseEuclideanMetric
from dipy.tracking.streamline import (set_number_of_points,
values_from_volume,
orient_by_streamline,
transform_streamlines,
Streamlines)
pd, have_pd, _ = optional_package("pandas")
_, have_tables, _ = optional_package("tables")
def peak_values(bundle, peaks, dt, pname, bname, subject, group_id, ind, dir):
""" Peak_values function finds the generalized fractional anisotropy (gfa)
and quantitative anisotropy (qa) values from peaks object (eg: csa) for
every point on a streamline used while tracking and saves it in hd5
file.
Parameters
----------
bundle : string
Name of bundle being analyzed
peaks : peaks
contains peak directions and values
dt : DataFrame
DataFrame to be populated
pname : string
Name of the dti metric
bname : string
Name of bundle being analyzed.
subject : string
subject number as a string (e.g. 10001)
group_id : integer
which group subject belongs to 1 patient and 0 for control
ind : integer list
ind tells which disk number a point belong.
dir : string
path of output directory
"""
gfa = peaks.gfa
anatomical_measures(bundle, gfa, dt, pname+'_gfa', bname, subject, group_id,
ind, dir)
qa = peaks.qa[...,0]
anatomical_measures(bundle, qa, dt, pname+'_qa', bname, subject, group_id,
ind, dir)
def anatomical_measures(bundle, metric, dt, pname, bname, subject, group_id,
ind, dir):
""" Calculates dti measure (eg: FA, MD) per point on streamlines and
save it in hd5 file.
Parameters
----------
bundle : string
Name of bundle being analyzed
metric : matrix of float values
dti metric e.g. FA, MD
dt : DataFrame
DataFrame to be populated
pname : string
Name of the dti metric
bname : string
Name of bundle being analyzed.
subject : string
subject number as a string (e.g. 10001)
group_id : integer
which group subject belongs to 1 for patient and 0 control
ind : integer list
ind tells which disk number a point belong.
dir : string
path of output directory
"""
dt["streamline"] = []
dt["disk"] = []
dt["subject"] = []
dt[pname] = []
dt["group"] = []
values = map_coordinates(metric, bundle._data.T,
order=1)
dt["disk"].extend(ind[list(range(len(values)))]+1)
dt["subject"].extend([subject]*len(values))
dt["group"].extend([group_id]*len(values))
dt[pname].extend(values)
for st_i in range(len(bundle)):
st = bundle[st_i]
dt["streamline"].extend([st_i]*len(st))
file_name = bname + "_" + pname
save_buan_profiles_hdf5(os.path.join(dir, file_name), dt)
def assignment_map(target_bundle, model_bundle, no_disks):
"""
Calculates assignment maps of the target bundle with reference to
model bundle centroids.
Parameters
----------
target_bundle : streamlines
target bundle extracted from subject data in common space
model_bundle : streamlines
atlas bundle used as reference
no_disks : integer, optional
Number of disks used for dividing bundle into disks. (Default 100)
References
----------
.. [Chandio19] Chandio, B.Q., S. Koudoro, D. Reagan, J. Harezlak,
E. Garyfallidis, Bundle Analytics: a computational and statistical
analyses framework for tractometric studies, Proceedings of:
International Society of Magnetic Resonance in Medicine (ISMRM),
Montreal, Canada, 2019.
"""
mbundle_streamlines = set_number_of_points(model_bundle,
nb_points=no_disks)
metric = AveragePointwiseEuclideanMetric()
qb = QuickBundles(threshold=85., metric=metric)
clusters = qb.cluster(mbundle_streamlines)
centroids = Streamlines(clusters.centroids)
_, indx = cKDTree(centroids.get_data(), 1,
copy_data=True).query(target_bundle.get_data(), k=1)
return indx
def gaussian_weights(bundle, n_points=100, return_mahalnobis=False,
stat=np.mean):
"""
Calculate weights for each streamline/node in a bundle, based on a
Mahalanobis distance from the core the bundle, at that node (mean, per
default).
Parameters
----------
bundle : Streamlines
The streamlines to weight.
n_points : int, optional
The number of points to resample to. *If the `bundle` is an array, this
input is ignored*. Default: 100.
Returns
-------
w : array of shape (n_streamlines, n_points)
Weights for each node in each streamline, calculated as its relative
inverse of the Mahalanobis distance, relative to the distribution of
coordinates at that node position across streamlines.
"""
# Resample to same length for each streamline:
bundle = set_number_of_points(bundle, n_points)
# This is the output
w = np.zeros((len(bundle), n_points))
# If there's only one fiber here, it gets the entire weighting:
if len(bundle) == 1:
if return_mahalnobis:
return np.array([np.nan])
else:
return np.array([1])
for node in range(n_points):
# This should come back as a 3D covariance matrix with the spatial
# variance covariance of this node across the different streamlines
# This is a 3-by-3 array:
node_coords = bundle._data[node::n_points]
c = np.cov(node_coords.T, ddof=0)
# Reorganize as an upper diagonal matrix for expected Mahalanobis
# input:
c = np.array([[c[0, 0], c[0, 1], c[0, 2]],
[0, c[1, 1], c[1, 2]],
[0, 0, c[2, 2]]])
# Calculate the mean or median of this node as well
# delta = node_coords - np.mean(node_coords, 0)
m = stat(node_coords, 0)
# Weights are the inverse of the Mahalanobis distance
for fn in range(len(bundle)):
# In the special case where all the streamlines have the exact same
# coordinate in this node, the covariance matrix is all zeros, so
# we can't calculate the Mahalanobis distance, we will instead give
# each streamline an identical weight, equal to the number of
# streamlines:
if np.allclose(c, 0):
w[:, node] = len(bundle)
break
# Otherwise, go ahead and calculate Mahalanobis for node on
# fiber[fn]:
w[fn, node] = mahalanobis(node_coords[fn], m, np.linalg.inv(c))
if return_mahalnobis:
return w
# weighting is inverse to the distance (the further you are, the less you
# should be weighted)
w = 1 / w
# Normalize before returning, so that the weights in each node sum to 1:
return w / np.sum(w, 0)
def afq_profile(data, bundle, affine, n_points=100,
orient_by=None, weights=None, **weights_kwarg):
"""
Calculates a summarized profile of data for a bundle or tract
along its length.
Follows the approach outlined in [Yeatman2012]_.
Parameters
----------
data : 3D volume
The statistic to sample with the streamlines.
bundle : StreamLines class instance
The collection of streamlines (possibly already resampled into an array
for each to have the same length) with which we are resampling. See
Note below about orienting the streamlines.
affine : array_like (4, 4)
The mapping from voxel coordinates to streamline points.
The voxel_to_rasmm matrix, typically from a NIFTI file.
n_points: int, optional
The number of points to sample along the bundle. Default: 100.
orient_by: streamline, optional.
A streamline to use as a standard to orient all of the streamlines in
the bundle according to.
weights : 1D array or 2D array or callable (optional)
Weight each streamline (1D) or each node (2D) when calculating the
tract-profiles. Must sum to 1 across streamlines (in each node if
relevant). If callable, this is a function that calculates weights.
weights_kwarg : key-word arguments
Additional key-word arguments to pass to the weight-calculating
function. Only to be used if weights is a callable.
Returns
-------
ndarray : a 1D array with the profile of `data` along the length of
`bundle`
Notes
-----
Before providing a bundle as input to this function, you will need to make
sure that the streamlines in the bundle are all oriented in the same
orientation relative to the bundle (use :func:`orient_by_streamline`).
References
----------
.. [Yeatman2012] Yeatman, Jason D., Robert F. Dougherty,
Nathaniel J. Myall, Brian A. Wandell, and Heidi M. Feldman. 2012.
"Tract Profiles of White Matter Properties: Automating Fiber-Tract
Quantification" PloS One 7 (11): e49790.
"""
if orient_by is not None:
bundle = orient_by_streamline(bundle, orient_by)
if affine is None:
affine = np.eye(4)
if len(bundle) == 0:
raise ValueError("The bundle contains no streamlines")
# Resample each streamline to the same number of points:
fgarray = set_number_of_points(bundle, n_points)
# Extract the values
values = np.array(values_from_volume(data, fgarray, affine))
if weights is None:
weights = np.ones(values.shape) / values.shape[0]
elif callable(weights):
weights = weights(bundle, **weights_kwarg)
else:
# We check that weights *always sum to 1 across streamlines*:
if not np.allclose(np.sum(weights, 0), np.ones(n_points)):
raise ValueError("The sum of weights across streamlines must ",
"be equal to 1")
return np.sum(weights * values, 0)
| bsd-3-clause |
wasade/qiime | qiime/compare_categories.py | 1 | 7151 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["Jai Ram Rideout", "Michael Dwan", "Logan Knecht",
"Damien Coy", "Levi McCracken"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "jai.rideout@gmail.com"
from os.path import join
from types import ListType
import pandas as pd
from skbio.stats.distance import DistanceMatrix
from skbio.stats.distance import anosim, permanova, bioenv
from qiime.parse import parse_mapping_file_to_dict
from qiime.util import get_qiime_temp_dir, MetadataMap, RExecutor
methods = ['adonis', 'anosim', 'bioenv', 'morans_i', 'mrpp', 'permanova',
'permdisp', 'dbrda']
def compare_categories(dm_fp, map_fp, method, categories, num_perms, out_dir):
"""Runs the specified statistical method using the category of interest.
This method does not return anything; all output is written to results
files in out_dir.
Arguments:
dm_fp - filepath to the input distance matrix
map_fp - filepath to the input metadata mapping file
categories - list of categories in the metadata mapping file to
consider in the statistical test. Multiple categories will only be
considered if method is 'bioenv', otherwise only the first category
will be considered
num_perms - the number of permutations to use when calculating the
p-value. If method is 'bioenv' or 'morans_i', this parameter will
be ignored as they are not permutation-based methods
out_dir - path to the output directory where results files will be
written. It is assumed that this directory already exists and we
have write permissions to it
"""
# Make sure we were passed a list of categories, not a single string.
if not isinstance(categories, ListType):
raise TypeError("The supplied categories must be a list of "
"strings.")
# Special case: we do not allow SampleID as it is not a category, neither
# in data structure representation nor in terms of a statistical test (no
# groups are formed since all entries are unique IDs).
if 'SampleID' in categories:
raise ValueError("Cannot use SampleID as a category because it is a "
"unique identifier for each sample, and thus does "
"not create groups of samples (nor can it be used as "
"a numeric category in Moran's I or BIO-ENV "
"analyses). Please choose a different metadata "
"column to perform statistical tests on.")
dm = DistanceMatrix.read(dm_fp)
if method in ('anosim', 'permanova', 'bioenv'):
with open(map_fp, 'U') as map_f:
md_dict = parse_mapping_file_to_dict(map_f)[0]
df = pd.DataFrame.from_dict(md_dict, orient='index')
out_fp = join(out_dir, '%s_results.txt' % method)
if method in ('anosim', 'permanova'):
if method == 'anosim':
method_fn = anosim
elif method == 'permanova':
method_fn = permanova
results = method_fn(dm, df, column=categories[0],
permutations=num_perms)
elif method == 'bioenv':
results = bioenv(dm, df, columns=categories)
results.to_csv(out_fp, sep='\t')
else:
# Remove any samples from the mapping file that aren't in the distance
# matrix (important for validation checks). Use strict=True so that an
# error is raised if the distance matrix contains any samples that
# aren't in the mapping file.
with open(map_fp, 'U') as map_f:
md_map = MetadataMap.parseMetadataMap(map_f)
md_map.filterSamples(dm.ids, strict=True)
# These methods are run in R. Input validation must be done here before
# running the R commands.
if method in ['adonis', 'morans_i', 'mrpp', 'permdisp', 'dbrda']:
# Check to make sure all categories passed in are in mapping file
# and are not all the same value.
for category in categories:
if not category in md_map.CategoryNames:
raise ValueError("Category '%s' not found in mapping file "
"columns." % category)
if md_map.hasSingleCategoryValue(category):
raise ValueError("All values in category '%s' are the "
"same. The statistical method '%s' "
"cannot operate on a category that "
"creates only a single group of samples "
"(e.g. there are no 'between' distances "
"because there is only a single group)."
% (category, method))
# Build the command arguments string.
command_args = ['-d %s -m %s -c %s -o %s'
% (dm_fp, map_fp, categories[0], out_dir)]
if method == 'morans_i':
# Moran's I requires only numeric categories.
for category in categories:
if not md_map.isNumericCategory(category):
raise TypeError("The category '%s' is not numeric. "
"Not all values could be converted to "
"numbers." % category)
else:
# The rest require groups of samples, so the category values
# cannot all be unique.
for category in categories:
if (md_map.hasUniqueCategoryValues(category) and not
(method == 'adonis' and
md_map.isNumericCategory(category))):
raise ValueError("All values in category '%s' are "
"unique. This statistical method "
"cannot operate on a category with "
"unique values (e.g. there are no "
"'within' distances because each "
"group of samples contains only a "
"single sample)." % category)
# Only Moran's I doesn't accept a number of permutations.
if num_perms < 0:
raise ValueError("The number of permutations must be "
"greater than or equal to zero.")
command_args[0] += ' -n %d' % num_perms
rex = RExecutor(TmpDir=get_qiime_temp_dir())
rex(command_args, '%s.r' % method, output_dir=out_dir)
else:
raise ValueError("Unrecognized method '%s'. Valid methods: %r"
% (method, methods))
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.