repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
breedlun/clearplot | clearplot/custom_annotations.py | 1 | 12108 | # -*- coding: utf-8 -*-
#Created on Fri Oct 10 19:39:21 2014
#@author: Benjamin Reedlunn
import matplotlib as _mpl
import numpy as _np
##==============================================================================
## Circle Box
##==============================================================================
#
#class _Circle_Box_Style(_mpl.patches.BoxStyle._Base):
# """
# A circle box style to be used with annotations
# """
# def __init__(self, pad=0.1):
# self.pad = pad
# super(_Circle_Box_Style, self).__init__()
#
# def transmute(self, x0, y0, width, height, mutation_size):
# # padding
# pad = mutation_size * self.pad
# # center
# cx, cy = x0+.5*width, y0+.5*height
# # width and height with padding added.
# width, height = width + 2.*pad, height + 2.*pad,
# # get radius
# radius = (width**2 + height**2)**.5 * .5
# print "radius = " + str(radius)
# # Generate path of the circle
# cir_path = _mpl.path.Path.unit_circle()
# vertices = radius*cir_path.vertices + (cx, cy)
# path = _mpl.path.Path(vertices, cir_path.codes)
#
# return path
#
#_mpl.patches.BoxStyle._style_list['circle'] = _Circle_Box_Style
#==============================================================================
# Slick Arrow
#==============================================================================
_dflt_head_length = round(72.0/25.4 * 7.0, 4)
_dflt_head_width = round(72.0/25.4 * 7.0 * 0.4, 4)
_dflt_length = 72.0/25.4 * 20
class _Slick_Arrow(_mpl.patches.ArrowStyle._Base):
"""
Base class for a John Shaw approved arrow style to be used with annotations
"""
def __init__(self, beginarrow = None, endarrow = None,
head_length = _dflt_head_length, head_width = _dflt_head_width, \
monolithic = False, length = 0):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size
of the arrow, in points (1/72"), relative to the *mutation scale*.
"""
self.beginarrow = beginarrow
self.endarrow = endarrow
self.head_length = head_length
self.head_width = head_width
self.fillbegin = True
self.fillend = True
self.monolithic = monolithic
self.length = length
super(_Slick_Arrow, self).__init__()
def _gen_arrow_path(self, x_a, x_b, head_width, head_length, length, \
monolithic = False):
"""
Returns the path for the arrowhead or a monolithic, straight, arrow.
Parameters
----------
x_a: 1x2 array or list
Coordinates of a point aligned with back of arrow
x_b: 1x2 array or list
Coordinates of the tip of the arrowhead
head_width: float
Arrowhead width, in points (1/72")
head_length: float
Arrowhead length, in points (1/72")
length: float
Arrow length from tip to tail, in points *1/72")
monolithic: bool, optional
Specifies whether to make the arrow a combination of a line and
a patch, or to make it a sinle monolithic patch
Returns
-------
path: path object
"""
#Define the radius of the back of the arrow (in points)
head_R = head_width / 2.0
if monolithic:
#Define the number of points along the curve of the back of the arrow
n = 12
#Define the total number of points needed to construct the patch
m = n + 4
#Specify the line width of the arrow tail (in points)
line_width = 0.75
#Construct the arrow in clockwise fashion
#Tip
X_tip = _np.array([[0],[0]])
#Bottom half of arrowhead back
#Find the angle of the intersection point, where the line hits the
#back of the arrowhead
theta_0 = _np.arcsin(line_width/(2.0*head_R))
theta_bot = _np.linspace(-_np.pi/2.0 + _np.pi/15.0, -theta_0, int(n/2))
X_bot = head_R * _np.array([_np.cos(theta_bot), _np.sin(theta_bot)])
X_bot[0,:] = X_bot[0,:] - head_length
#Straight line
X_line = _np.array([[-length, -line_width/2.0], [-length, line_width/2.0]]).T
#Top half of arrowhead back
theta_top = _np.linspace(theta_0, _np.pi/2.0 - _np.pi/15.0, int(n/2))
X_top = head_R * _np.array([_np.cos(theta_top), _np.sin(theta_top)])
X_top[0,:] = X_top[0,:] - head_length
#Put them together, and add extra point at the tip to close the path
X = _np.concatenate([X_tip, X_bot, X_line, X_top, X_tip, X_tip], axis = 1)
else:
#Define the number of points along the curve of the back of the arrow
n = 11
#Define the total number of points needed to construct the patch
m = n + 2
#Construct the arrow in clockwise fashion
#Tip
X_tip = _np.array([[0],[0]])
#Arrowhead back
theta = _np.linspace(-_np.pi/2.0 + _np.pi/15.0, _np.pi/2.0 - _np.pi/15.0, n)
X_back = head_R * _np.array([_np.cos(theta), _np.sin(theta)])
X_back[0,:] = X_back[0,:] - head_length
#Put them together, and add extra point at the tip to close the path
X = _np.concatenate([X_tip, X_back, X_tip, X_tip], axis = 1)
#Get the arrowhead orientation
orient = _np.arctan2(x_b[1] - x_a[1], x_b[0] - x_a[0])
#Define a rotation matrix and rotate into the desired orientation
Q = _np.array([[_np.cos(orient), -_np.sin(orient)], [_np.sin(orient), _np.cos(orient)]])
X = _np.dot(Q, X)
#Shift to the desired location
path_vertices = _np.array(x_b)[:,None] + X
#Define the reference point that defines the location of the patch (MOVETO)
#Define the other points that define the shape of the patch (LINETO)
#Define the point to close the polygon with (CLOSEPOLY)
path_codes = [_mpl.path.Path.MOVETO] + \
[_mpl.path.Path.LINETO] * (m - 1) + \
[_mpl.path.Path.CLOSEPOLY]
#Generate the path object
path = _mpl.path.Path(path_vertices.T, path_codes)
return(path)
def transmute(self, path, mutation_size, line_width):
"""
Generates the proper arrow head path and scales it.
Parameters
----------
path: path object
The path along which the arrow will be drawn
muatation_size: float
The amount the arrow head will be scaled
line_width: float
May be used to adjust the the path so that it does not pass beyond
the given points.
Returns
-------
path_list: list
A list of Path instances
fillable list: list
A list of booleans, each indicating whether the corresponding Path
instance in `path_list` can be filled or not.
"""
#Scale the arrow
head_length = self.head_length * mutation_size
head_width = self.head_width * mutation_size
length = self.length * mutation_size
#Construct the output lists
path_list = [path]
fillable_list = [False]
#Arrow at the beginning of the line
if self.beginarrow:
#The incoming path is typically an 'arc3' path, so we grab the
#first two vertices to help align the arrow head
x0 = path.vertices[0]
x1 = path.vertices[1]
ah_path = self._gen_arrow_path(x1, x0, head_width, \
head_length, length, monolithic = self.monolithic)
path_list.append(ah_path)
fillable_list.append(True)
#Arrow at the end of the line
if self.endarrow:
#The incoming path is typically an 'arc3' path, so we grab the
#last two vertices to help align the arrow head
x2 = path.vertices[-2]
x3 = path.vertices[-1]
ah_path = self._gen_arrow_path(x2, x3, head_width, \
head_length, length, monolithic = self.monolithic)
path_list.append(ah_path)
fillable_list.append(True)
return(path_list, fillable_list)
class Slick_Arrow_A(_Slick_Arrow):
"""
An arrow with a head at its beginning point.
"""
def __init__(self, head_length = _dflt_head_length, \
head_width = _dflt_head_width, length = _dflt_length):
"""
Instantiates an arrow with a head at its beginning point.
Parameters
----------
head_length : float, optional
length of the arrow head, in points (1/72")
head_width : float, optional
width of the arrow head, in points (1/72")
length : float, optional
length of the arrow from tip to tail, in points (1/72")
"""
super(Slick_Arrow_A, self).__init__(
beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width, length = length)
_mpl.patches.ArrowStyle._style_list["<(-"] = Slick_Arrow_A
class Slick_Arrow_B(_Slick_Arrow):
"""
An arrow with a head at its end point.
"""
def __init__(self, head_length = _dflt_head_length, \
head_width = _dflt_head_width, length = _dflt_length):
"""
Instantiates an arrow with a head at its end point.
Parameters
----------
head_length : float, optional
length of the arrow head, in points (1/72")
head_width : float, optional
width of the arrow head, in points (1/72")
length : float, optional
length of the arrow from tip to tail, in points (1/72")
"""
super(Slick_Arrow_B, self).__init__(
beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width, length = length)
_mpl.patches.ArrowStyle._style_list["-)>"] = Slick_Arrow_B
class Slick_Arrow_AB(_Slick_Arrow):
"""
An arrow with heads at its beginning and end points.
"""
def __init__(self, head_length = _dflt_head_length, \
head_width = _dflt_head_width, length = _dflt_length):
"""
Instantiates an arrow with heads at its beginning and end points.
Parameters
----------
head_length : float, optional
length of the arrow head, in points (1/72")
head_width : float, optional
width of the arrow head, in points (1/72")
length : float, optional
length of the arrow from tip to tail, in points (1/72")
"""
super(Slick_Arrow_AB, self).__init__(
beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width, length = length)
_mpl.patches.ArrowStyle._style_list["<(-)>"] = Slick_Arrow_AB
class Axis_Arrow(_Slick_Arrow):
"""
A monolithic arrow with a head at its end point.
"""
def __init__(self, head_length = _dflt_head_length, \
head_width = _dflt_head_width, length = _dflt_length):
"""
Parameters
----------
head_length : float, optional
length of the arrow head, in points (1/72")
head_width : float, optional
width of the arrow head, in points (1/72")
length : float, optional
length of the arrow from tip to tail, in points (1/72")
"""
super(Axis_Arrow, self).__init__( \
beginarrow=False, endarrow=True, \
head_length=head_length, head_width=head_width, \
monolithic = True, length = length)
_mpl.patches.ArrowStyle._style_list["=)>"] = Axis_Arrow | mit |
detrout/debian-statsmodels | examples/python/generic_mle.py | 33 | 7532 |
## Maximum Likelihood Estimation (Generic models)
# This tutorial explains how to quickly implement new maximum likelihood models in `statsmodels`. We give two examples:
#
# 1. Probit model for binary dependent variables
# 2. Negative binomial model for count data
#
# The `GenericLikelihoodModel` class eases the process by providing tools such as automatic numeric differentiation and a unified interface to ``scipy`` optimization functions. Using ``statsmodels``, users can fit new MLE models simply by "plugging-in" a log-likelihood function.
# ## Example 1: Probit model
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
# The ``Spector`` dataset is distributed with ``statsmodels``. You can access a vector of values for the dependent variable (``endog``) and a matrix of regressors (``exog``) like this:
data = sm.datasets.spector.load_pandas()
exog = data.exog
endog = data.endog
print(sm.datasets.spector.NOTE)
print(data.exog.head())
# Them, we add a constant to the matrix of regressors:
exog = sm.add_constant(exog, prepend=True)
# To create your own Likelihood Model, you simply need to overwrite the loglike method.
class MyProbit(GenericLikelihoodModel):
def loglike(self, params):
exog = self.exog
endog = self.endog
q = 2 * endog - 1
return stats.norm.logcdf(q*np.dot(exog, params)).sum()
# Estimate the model and print(a summary:
sm_probit_manual = MyProbit(endog, exog).fit()
print(sm_probit_manual.summary())
# Compare your Probit implementation to ``statsmodels``' "canned" implementation:
sm_probit_canned = sm.Probit(endog, exog).fit()
print(sm_probit_canned.params)
print(sm_probit_manual.params)
print(sm_probit_canned.cov_params())
print(sm_probit_manual.cov_params())
# Notice that the ``GenericMaximumLikelihood`` class provides automatic differentiation, so we didn't have to provide Hessian or Score functions in order to calculate the covariance estimates.
#
#
# ## Example 2: Negative Binomial Regression for Count Data
#
# Consider a negative binomial regression model for count data with
# log-likelihood (type NB-2) function expressed as:
#
# $$
# \mathcal{L}(\beta_j; y, \alpha) = \sum_{i=1}^n y_i ln
# \left ( \frac{\alpha exp(X_i'\beta)}{1+\alpha exp(X_i'\beta)} \right ) -
# \frac{1}{\alpha} ln(1+\alpha exp(X_i'\beta)) + ln \Gamma (y_i + 1/\alpha) - ln \Gamma (y_i+1) - ln \Gamma (1/\alpha)
# $$
#
# with a matrix of regressors $X$, a vector of coefficients $\beta$,
# and the negative binomial heterogeneity parameter $\alpha$.
#
# Using the ``nbinom`` distribution from ``scipy``, we can write this likelihood
# simply as:
#
import numpy as np
from scipy.stats import nbinom
def _ll_nb2(y, X, beta, alph):
mu = np.exp(np.dot(X, beta))
size = 1/alph
prob = size/(size+mu)
ll = nbinom.logpmf(y, size, prob)
return ll
# ### New Model Class
#
# We create a new model class which inherits from ``GenericLikelihoodModel``:
from statsmodels.base.model import GenericLikelihoodModel
class NBin(GenericLikelihoodModel):
def __init__(self, endog, exog, **kwds):
super(NBin, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
alph = params[-1]
beta = params[:-1]
ll = _ll_nb2(self.endog, self.exog, beta, alph)
return -ll
def fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds):
# we have one additional parameter and we need to add it for summary
self.exog_names.append('alpha')
if start_params == None:
# Reasonable starting values
start_params = np.append(np.zeros(self.exog.shape[1]), .5)
# intercept
start_params[-2] = np.log(self.endog.mean())
return super(NBin, self).fit(start_params=start_params,
maxiter=maxiter, maxfun=maxfun,
**kwds)
# Two important things to notice:
#
# + ``nloglikeobs``: This function should return one evaluation of the negative log-likelihood function per observation in your dataset (i.e. rows of the endog/X matrix).
# + ``start_params``: A one-dimensional array of starting values needs to be provided. The size of this array determines the number of parameters that will be used in optimization.
#
# That's it! You're done!
#
# ### Usage Example
#
# The [Medpar](http://vincentarelbundock.github.com/Rdatasets/doc/COUNT/medpar.html)
# dataset is hosted in CSV format at the [Rdatasets repository](http://vincentarelbundock.github.com/Rdatasets). We use the ``read_csv``
# function from the [Pandas library](http://pandas.pydata.org) to load the data
# in memory. We then print(the first few columns:
#
import statsmodels.api as sm
medpar = sm.datasets.get_rdataset("medpar", "COUNT", cache=True).data
medpar.head()
# The model we are interested in has a vector of non-negative integers as
# dependent variable (``los``), and 5 regressors: ``Intercept``, ``type2``,
# ``type3``, ``hmo``, ``white``.
#
# For estimation, we need to create two variables to hold our regressors and the outcome variable. These can be ndarrays or pandas objects.
y = medpar.los
X = medpar[["type2", "type3", "hmo", "white"]]
X["constant"] = 1
# Then, we fit the model and extract some information:
mod = NBin(y, X)
res = mod.fit()
# Extract parameter estimates, standard errors, p-values, AIC, etc.:
print('Parameters: ', res.params)
print('Standard errors: ', res.bse)
print('P-values: ', res.pvalues)
print('AIC: ', res.aic)
# As usual, you can obtain a full list of available information by typing
# ``dir(res)``.
# We can also look at the summary of the estimation results.
print(res.summary())
# ### Testing
# We can check the results by using the statsmodels implementation of the Negative Binomial model, which uses the analytic score function and Hessian.
res_nbin = sm.NegativeBinomial(y, X).fit(disp=0)
print(res_nbin.summary())
print(res_nbin.params)
print(res_nbin.bse)
# Or we could compare them to results obtained using the MASS implementation for R:
#
# url = 'http://vincentarelbundock.github.com/Rdatasets/csv/COUNT/medpar.csv'
# medpar = read.csv(url)
# f = los~factor(type)+hmo+white
#
# library(MASS)
# mod = glm.nb(f, medpar)
# coef(summary(mod))
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 2.31027893 0.06744676 34.253370 3.885556e-257
# factor(type)2 0.22124898 0.05045746 4.384861 1.160597e-05
# factor(type)3 0.70615882 0.07599849 9.291748 1.517751e-20
# hmo -0.06795522 0.05321375 -1.277024 2.015939e-01
# white -0.12906544 0.06836272 -1.887951 5.903257e-02
#
# ### Numerical precision
#
# The ``statsmodels`` generic MLE and ``R`` parameter estimates agree up to the fourth decimal. The standard errors, however, agree only up to the second decimal. This discrepancy is the result of imprecision in our Hessian numerical estimates. In the current context, the difference between ``MASS`` and ``statsmodels`` standard error estimates is substantively irrelevant, but it highlights the fact that users who need very precise estimates may not always want to rely on default settings when using numerical derivatives. In such cases, it is better to use analytical derivatives with the ``LikelihoodModel`` class.
#
| bsd-3-clause |
xhray/iislogs | iislogs/views.py | 1 | 1811 | # -*- coding: UTF-8 -*-
from django.shortcuts import render_to_response, render
from django.http import HttpResponseRedirect, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from models import iis_logs, hit_stats
from forms import HitStatQueryForm
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import numpy as np
import cStringIO
def list(request):
total = iis_logs.objects().count()
entities = iis_logs.objects()
return render_to_response('list.html', locals())
@csrf_exempt
def listhitstats(request):
if request.method == 'POST':
form = HitStatQueryForm(request.POST)
if form.is_valid():
record_limit = 10
date = form.cleaned_data['day']
entities = hit_stats.objects().filter(id__year=date.year, id__month=str(date.month).zfill(2), id__day=str(date.day).zfill(2)).order_by('-value__count').limit(record_limit)
fig = Figure(figsize=(15, 25))
ax = fig.add_subplot(111)
ax.bar(np.arange(len(entities)), map(lambda x : x.value['count'], entities), alpha=0.4)
ax.set_xticks(np.arange(record_limit))
ax.set_xticklabels(map(lambda x: x.id['url'], entities), rotation=60)
ax.set_title('sfa hit stats : {0}'.format(date))
canvas = FigureCanvas(fig)
buf = cStringIO.StringIO()
canvas.print_png(buf, format='png')
chart = buf.getvalue().encode('base64').strip()
return render(request, 'listhitstats.html', {'entities':entities, 'form':form, 'chart':chart})
else:
form = HitStatQueryForm()
return render(request, 'listhitstats.html', {'form': form}) | apache-2.0 |
inflector/opencog | opencog/python/spatiotemporal/temporal_events/relation_formulas.py | 33 | 19534 | from math import fabs, sqrt, floor
from numpy import convolve, NINF as NEGATIVE_INFINITY, PINF as POSITIVE_INFINITY
import numpy
from scipy.stats.distributions import uniform_gen
from spatiotemporal.temporal_events.util import calculate_bounds_of_probability_distribution
from spatiotemporal.temporal_interval_handling import calculateCenterMass
from spatiotemporal.time_intervals import TimeInterval
from utility.functions import FunctionPiecewiseLinear, FunctionHorizontalLinear, integral, FUNCTION_ZERO, almost_equals
DECOMPOSITION_PRECISION = 10 ** 14
__author__ = 'keyvan'
TEMPORAL_RELATIONS = {
'p': 'precedes',
'm': 'meets',
'o': 'overlaps',
'F': 'finished by',
'D': 'contains',
's': 'starts',
'e': 'equals',
'S': 'started by',
'd': 'during',
'f': 'finishes',
'O': 'overlapped by',
'M': 'met by',
'P': 'preceded by'
}
class TemporalRelation(dict):
all_relations = 'pmoFDseSdfOMP'
_type = None
_list = None
_vector = None
@staticmethod
def from_list(list_object):
relation = TemporalRelation()
for i, name in enumerate(TemporalRelation.all_relations):
value = list_object[i]
if not isinstance(value, (int, float)):
value = float(value)
relation[name] = value
return relation
def to_list(self):
if self._list is None:
self._list = []
for name in self.all_relations:
self._list.append(self[name])
return self._list
def to_vector(self):
if self._vector is None:
_list = self.to_list()
self._vector = numpy.array(_list)
return self._vector
@property
def type(self):
if self._type is None:
self._type = ''.join([name for name in TemporalRelation.all_relations if self[name] > 0])
return self._type
def __setitem__(self, relation_name, value):
if relation_name not in TemporalRelation.all_relations:
raise AttributeError("'{0}' is not a valid Allen relation'".format(relation_name))
dict.__setitem__(self, relation_name, floor(value * DECOMPOSITION_PRECISION) / DECOMPOSITION_PRECISION)
def __repr__(self):
return 'TemporalRelation({0})'.format(self.type)
def __str__(self):
return repr(self)
def __hash__(self):
return hash(tuple(self.to_list()))
class BaseRelationFormula(object):
def __init__(self):
self.bounds = {}
def duration_of(self, dist):
a, b = self.bounds_of(dist)
return fabs(a - b)
def bounds_of(self, dist):
# if dist in self.bounds:
# return self.bounds[dist]
bounds = calculate_bounds_of_probability_distribution(dist)
self.bounds[dist] = bounds
return bounds
def before_point(self, point_1_value, point_2_value):
return 0
def same_point(self, point_1_value, point_2_value):
return 1 - fabs(self.before_point(point_1_value,
point_2_value) - self.after_point(point_1_value, point_2_value))
def after_point(self, point_1_value, point_2_value):
return self.before_point(point_2_value, point_1_value)
def before_integral_bounds(self, dist_1, dist_2):
return calculate_bounds_of_probability_distribution(dist_1)
def same_integral_bounds(self, dist_1, dist_2):
dist_1_a, dist_1_b = calculate_bounds_of_probability_distribution(dist_1)
dist_2_a, dist_2_b = calculate_bounds_of_probability_distribution(dist_2)
return max(dist_1_a, dist_2_a), min(dist_1_b, dist_2_b)
def after_integral_bounds(self, dist_1, dist_2):
return calculate_bounds_of_probability_distribution(dist_2)
def before(self, dist_1, dist_2):
return integral(lambda x: self.before_point(dist_1.pdf(x), dist_2.pdf(x)),
*self.before_integral_bounds(dist_1, dist_2))
def same(self, dist_1, dist_2):
return integral(lambda x: self.same_point(dist_1.pdf(x), dist_2.pdf(x)),
*self.same_integral_bounds(dist_1, dist_2))
def after(self, dist_1, dist_2):
return integral(lambda x: self.after_point(dist_1.pdf(x), dist_2.pdf(x)),
*self.after_integral_bounds(dist_1, dist_2))
def compare(self, dist_1, dist_2):
"""
returns before, same and after
"""
return self.before(dist_1, dist_2), self.same(dist_1, dist_2), self.after(dist_1, dist_2)
class FormulaCreator(object):
def __init__(self, relation_formula):
self.relation_formula = relation_formula
def temporal_relations_between(self, temporal_event_1, temporal_event_2):
dist_1_beginning, dist_1_ending = temporal_event_1.distribution_beginning, temporal_event_1.distribution_ending
dist_2_beginning, dist_2_ending = temporal_event_2.distribution_beginning, temporal_event_2.distribution_ending
self.relation_formula.bounds[dist_1_beginning] = temporal_event_1.a, temporal_event_1.beginning
self.relation_formula.bounds[dist_1_ending] = temporal_event_1.ending, temporal_event_1.b
self.relation_formula.bounds[dist_2_beginning] = temporal_event_2.a, temporal_event_2.beginning
self.relation_formula.bounds[dist_2_ending] = temporal_event_2.ending, temporal_event_2.b
combinations = [
(dist_1_beginning, dist_2_beginning),
(dist_1_beginning, dist_2_ending),
(dist_1_ending, dist_2_beginning),
(dist_1_ending, dist_2_ending)
]
return self.calculate_relations(combinations)
def calculate_relations(self, combinations=None):
"""
Calculates the values of the 13 relations based on the before, same,
and after values of the combinations between the beginning and
ending distributions of the two intervals obtained, e.g. from
the DecompositionFitter.
:param combinations: the 4 combinations between beginning and ending
distribution
:return: a dictionary containing the 13 relations as keys and their
degrees as values
"""
if combinations is None:
combinations = self.relation_formula.combinations
dist_1_beginning, dist_2_beginning = combinations[0]
dist_1_ending, dist_2_ending = combinations[3]
before = {}
same = {}
after = {}
# iterates over the 4 combinations between beginning and ending
for key in combinations:
before[key], same[key], after[key] = self.relation_formula.compare(*key)
result = TemporalRelation()
result['p'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
before[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['m'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
same[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['o'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['F'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * same[dist_1_ending, dist_2_ending]
result['D'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['s'] = same[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['e'] = same[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * same[dist_1_ending, dist_2_ending]
result['S'] = same[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['d'] = after[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['f'] = after[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * same[dist_1_ending, dist_2_ending]
result['O'] = after[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['M'] = after[dist_1_beginning, dist_2_beginning] * same[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['P'] = after[dist_1_beginning, dist_2_beginning] * after[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
return result
class RelationFormulaConvolution(BaseRelationFormula):
def function_convolution_uniform(self, bounds_1, bounds_2, probability=None):
a1, b1 = bounds_1
a2, b2 = bounds_2
length_1 = fabs(a1 - b1)
length_2 = fabs(a2 - b2)
convolution_bounds_a, convolution_bounds_b = a1 - b2, b1 - a2
trapezium_0, trapezium_1 = convolution_bounds_a, convolution_bounds_a + min(length_2, length_1)
trapezium_2, trapezium_3 = trapezium_1 + fabs(length_1 - length_2), convolution_bounds_b
#assert trapezium_2 + min(length_2, length_1) == trapezium_3
if probability is None:
probability = min(1 / length_1, 1 / length_2)
result = FunctionPiecewiseLinear(
{trapezium_0: 0, trapezium_1: probability, trapezium_2: probability, trapezium_3: 0},
FUNCTION_ZERO)
result.is_normalised = True
return result
def function_convolution(self, dist_1, dist_2, bins=50):
a_1, b_1, a_2, b_2 = 0, 0, 0, 0
if dist_1 in self.bounds:
a_1, b_1 = self.bounds[dist_1]
else:
a_1, b_1 = calculate_bounds_of_probability_distribution(dist_1)
self.bounds[dist_1] = a_1, b_1
if dist_2 in self.bounds:
a_2, b_2 = self.bounds[dist_2]
else:
a_2, b_2 = calculate_bounds_of_probability_distribution(dist_2)
self.bounds[dist_2] = a_2, b_2
if (type(dist_1.dist), type(dist_2.dist)) == (uniform_gen, uniform_gen):
return self.function_convolution_uniform((a_1, b_1), (a_2, b_2))
convolution_bounds_a, convolution_bounds_b = min(a_1, a_2), max(b_1, b_2)
delta = fabs(convolution_bounds_a - convolution_bounds_b) / bins
convolution_interval = TimeInterval(convolution_bounds_a, convolution_bounds_b, bins)
x = [dist_1.pdf(t) for t in convolution_interval]
y = [dist_2.pdf(t) for t in reversed(convolution_interval)]
c = convolve(x, y)
dictionary_convolution = {}
for t in xrange(len(c)):
dictionary_convolution[delta * t] = c[t]
bias = calculateCenterMass(dictionary_convolution)[0] + dist_2.mean() - dist_1.mean()
dictionary_convolution_biased = {}
for t in dictionary_convolution:
dictionary_convolution_biased[t - bias] = dictionary_convolution[t]
convolution_function = FunctionPiecewiseLinear(dictionary_convolution_biased, FunctionHorizontalLinear(0))
return convolution_function.normalised()
def calculate_similarity(self, dist_1, dist_2):
if (type(dist_1.dist), type(dist_2.dist)) == (uniform_gen, uniform_gen):
length_dist_1 = self.duration_of(dist_1)
length_dist_2 = self.duration_of(dist_2)
return min(length_dist_1, length_dist_2) / sqrt(length_dist_1 * length_dist_2)
dist_1_mean, dist_2_mean = dist_1.mean(), dist_2.mean()
dist_1_transformed = lambda t: dist_1.pdf(t + dist_1_mean)
dist_2_transformed = lambda t: dist_2.pdf(t + dist_2_mean)
geometric_mean = lambda t: sqrt(dist_1_transformed(t) * dist_2_transformed(t))
return integral(geometric_mean, NEGATIVE_INFINITY, POSITIVE_INFINITY)
def compare(self, dist_1, dist_2):
convolution = self.function_convolution(dist_1, dist_2)
before = integral(convolution, NEGATIVE_INFINITY, 0)
after = integral(convolution, 0, POSITIVE_INFINITY)
similarity = self.calculate_similarity(dist_1, dist_2)
correlation = 1 - fabs(before - after)
same = similarity * correlation
return before, same, after
class RelationFormulaGeometricMean(BaseRelationFormula):
def compare(self, dist_1, dist_2):
dist_1_interval = TimeInterval(*self.bounds_of(dist_1))
dist_2_interval = TimeInterval(*self.bounds_of(dist_2))
dictionary_input_output = {}
for time_step in dist_1_interval + dist_2_interval:
dictionary_input_output[time_step] = sqrt(dist_1.pdf(time_step) * dist_2.pdf(time_step))
geometric_mean = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=FUNCTION_ZERO)
same = integral(geometric_mean, NEGATIVE_INFINITY, POSITIVE_INFINITY)
dist_1_mean, dist_1_skewness, dist_1_kurtosis = dist_1.stats(moments='msk')
dist_1_standard_deviation = dist_1.std()
dist_2_mean, dist_2_skewness, dist_2_kurtosis = dist_2.stats(moments='msk')
dist_2_standard_deviation = dist_2.std()
distance = fabs(dist_1_standard_deviation - dist_2_standard_deviation) + fabs(dist_1_skewness - dist_2_skewness)
distance += fabs(dist_1_kurtosis - dist_2_kurtosis)
delta = dist_1_mean - dist_2_mean
non_same_portion = 1.0 - same
portion_after, portion_before = 1.0, 0.0
if almost_equals(distance, 0):
if delta < 0:
portion_after, portion_before = 0.0, 1.0
else:
dist_1_standardized_pdf = lambda x: dist_1.pdf(dist_1_standard_deviation * x + dist_1_mean)
dist_2_standardized_pdf = lambda x: dist_2.pdf(dist_2_standard_deviation * x + dist_2_mean)
geometric_mean = lambda t: sqrt(dist_1_standardized_pdf(t) * dist_2_standardized_pdf(t))
geometric_mean_scaled = lambda p: geometric_mean(p / distance)
geometric_mean_scaled_length = max(self.duration_of(dist_1), self.duration_of(dist_2))
dictionary_input_output = {}
for time_step in TimeInterval(-geometric_mean_scaled_length / 2.0, geometric_mean_scaled_length / 2.0):
dictionary_input_output[time_step] = geometric_mean_scaled(time_step)
geometric_mean_scaled = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=FUNCTION_ZERO)
portion_after = integral(geometric_mean_scaled, NEGATIVE_INFINITY, delta)
portion_before = integral(geometric_mean_scaled, delta, POSITIVE_INFINITY)
after = portion_after / (portion_after + portion_before) * non_same_portion
return 1.0 - same - after, same, after
if __name__ == '__main__':
import matplotlib.pyplot as plt
from scipy.stats import norm, uniform, expon
from spatiotemporal.temporal_events import TemporalEvent, TemporalEventPiecewiseLinear
import matplotlib.pyplot as plt
figure_number = 1
for event_1, event_2 in [
(
TemporalEvent(uniform(loc=3, scale=2), uniform(loc=7, scale=9)),
TemporalEvent(uniform(loc=0, scale=10), uniform(loc=13, scale=2))
),
#
# (
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=3, scale=2)),
# TemporalEvent(uniform(loc=3, scale=2), uniform(loc=6, scale=2))
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=4), uniform(loc=6, scale=4)),
# TemporalEvent(uniform(loc=8, scale=5), uniform(loc=15, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=6, scale=4)),
# TemporalEvent(uniform(loc=3, scale=2), uniform(loc=13, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=7), uniform(loc=8, scale=7)),
# TemporalEvent(uniform(loc=4, scale=1), uniform(loc=11, scale=2)),
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=4), uniform(loc=6, scale=4)),
# TemporalEvent(uniform(loc=0, scale=11), uniform(loc=13, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=8), uniform(loc=6, scale=8)),
# TemporalEvent(uniform(loc=0, scale=22), uniform(loc=13, scale=8))
# ),
#
# (
# TemporalEvent(uniform(loc=2, scale=2), uniform(loc=7, scale=2)),
# TemporalEvent(uniform(loc=1, scale=4), uniform(loc=6, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=2), uniform(loc=4, scale=2)),
# TemporalEvent(uniform(loc=6, scale=2), uniform(loc=9, scale=2))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=3), uniform(loc=15, scale=2)),
# TemporalEvent(uniform(loc=5, scale=2), uniform(loc=9, scale=3))
# ),
#
# (
# TemporalEvent(uniform(loc=5, scale=3), uniform(loc=9, scale=2)),
# TemporalEvent(uniform(loc=1, scale=2), uniform(loc=15, scale=3))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=10, scale=2)),
# TemporalEvent(uniform(loc=15, scale=2), uniform(loc=25, scale=2))
# ),
#
# (
# TemporalEvent(uniform(loc=15, scale=2), uniform(loc=25, scale=2)),
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=10, scale=2))
# ),
#
# (
# TemporalEvent(norm(loc=1, scale=4.5), expon(loc=30, scale=2)),
# TemporalEvent(norm(loc=25, scale=4.5), expon(loc=60, scale=2))
# ),
#
# (
# TemporalEvent(expon(loc=1, scale=4.5), norm(loc=30, scale=2)),
# TemporalEvent(expon(loc=25, scale=4.5), norm(loc=60, scale=2))
# ),
#
# (
# TemporalEventPiecewiseLinear({1: 0, 2: 0.1, 3: 0.3, 4: 0.7, 5: 1}, {6: 1, 7: 0.9, 8: 0.6, 9: 0.1, 10: 0}),
# TemporalEventPiecewiseLinear({7.5: 0, 8.5: 0.1, 9.5: 0.3, 10.5: 0.7, 11.5: 1},
# {13: 1, 14.5: 0.9, 15.3: 0.6, 17: 0.1, 20: 0})
# ),
]:
temporal_relations = event_1 * event_2
print '\nFigure' + str(figure_number)
print '----------------------'
print sum(temporal_relations.values())
for p in 'pmoFDseSdfOMP':
print p, temporal_relations[p]
figure_number += 1
event_1.plot(show_distributions=True).ylim(ymin=-0.1, ymax=1.1)
event_2.plot(show_distributions=True).figure()
plt.show()
| agpl-3.0 |
vortex-ape/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 9 | 3600 | # Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
import pytest
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
@pytest.mark.parametrize('sparse', (True, False))
@pytest.mark.parametrize('dtype', (int, np.float32, np.int16))
@pytest.mark.parametrize('sort', (True, False))
@pytest.mark.parametrize('iterable', (True, False))
def test_dictvectorizer(sparse, dtype, sort, iterable):
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
boada/planckClusters | observing/mkObservingPlan_mosaic.py | 1 | 3578 | import astroplan
from astroplan import Observer, FixedTarget, ObservingBlock
from astroplan.constraints import AtNightConstraint, AirmassConstraint,\
TimeConstraint
from astroplan.scheduling import Transitioner, SequentialScheduler, Schedule,\
PriorityScheduler
from astropy.coordinates import SkyCoord
from astropy.time import Time, TimeDelta
from astropy import units as u
import pandas as pd
import numpy as np
from astLib import astCoords
def mst2utc(time):
''' Converts from MST to UTC '''
dt = TimeDelta(7 * 3600, format='sec')
return time + dt
def utc2mts(time):
''' Converts from UTC to MST '''
dt = TimeDelta(7 * 3600, format='sec')
return time - dt
# load the data from the CSV of the spreadsheet
data = pd.read_csv('./catalogs/PSZ2_unconfirmed_catalog.csv')
# figure out which we need to observe
mask = pd.isnull(data['Optical run1'])
data = data[mask]
mask = data['SNR'] > 5.
data = data[mask]
# make telescope location
kpno = Observer.at_site('KPNO', timezone='US/Mountain')
# make target coordinates
coords = [SkyCoord(ra, dec, unit='deg', frame='icrs')
for ra, dec in zip(data['RA'], data['DEC'])]
# make all the targets
targets = [FixedTarget(name=name, coord=coor)
for name, coor in zip(data['Name'], coords)]
# make the observing time - Local time.
start_time_mst = Time('2016-11-23 18:30')
end_time_mst = Time('2016-11-26 07:00')
night_time = end_time_mst - start_time_mst
observable_time_mst = start_time_mst + night_time * np.linspace(0, 1, 75)
# convert everything to UTC
start_time_utc = mst2utc(start_time_mst)
end_time_utc = mst2utc(end_time_mst)
observable_time_utc = mst2utc(observable_time_mst)
# now we figure out what is observable at all with some simple constraints
constraint = [AtNightConstraint.twilight_civil(),
AirmassConstraint(max=2.7, boolean_constraint=False)]
#observable = astroplan.is_observable(constraint, kpno, targets,
# times=observable_time_utc)
#data['Observable'] = observable
#data.to_csv('updated.csv')
# now we make the exposure times
exp_time_single_gr = 90 * u.second
exp_time_single_iz = 275 * u.second
readout_time = 30 * u.second
settle_time = 10 * u.second
n_coadd = 1
n_dither = 4
exp_time_tot_gr = n_coadd * n_dither * (
exp_time_single_gr + readout_time + settle_time)
exp_time_tot_iz = n_coadd * n_dither * (
exp_time_single_iz + readout_time + settle_time)
array = data['SNR'].values
temp = array.argsort()[::-1] # sort high to low
ranks = np.empty(len(array), int)
ranks[temp] = np.arange(len(array)) + 1
# observing blocks
blocks = []
for index, t in enumerate(targets):
b = ObservingBlock(t,
3600 * u.second,
priority=ranks[index],
constraints=constraint,
configuration={'SNR':
'%.4f' % data['SNR'].values[index]})
blocks.append(b)
# now we make a transitioner
slew_rate = .5 * u.deg / u.second
transitioner = Transitioner(slew_rate)
# Initialize the priority scheduler with the constraints and transitioner
prior_scheduler = PriorityScheduler(constraints=constraint,
observer=kpno,
transitioner=transitioner)
# Initialize a Schedule object, to contain the new schedule
priority_schedule = Schedule(start_time_utc, end_time_utc)
try:
prior_scheduler(blocks, priority_schedule)
except ValueError:
print(priority_schedule.to_table())
| mit |
georgetown-analytics/bike-psychics | GitCapBikeFeatures.py | 1 | 3354 | # Author: Selma Gomez Orr <selmagomezorr@gmail.com> Copyright (C) May 2, 2015
##########################################################################
## Imports
##########################################################################
import os
import pandas as pd
import numpy as np
import xlrd
from sklearn.cross_validation import train_test_split as tts
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
import matplotlib.pyplot as plt
from pandas.tools.plotting import scatter_matrix
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.tree import DecisionTreeRegressor
##########################################################################
## Module Constants
##########################################################################
DIRNAME = os.path.dirname(__file__)
DATAPATH = os.path.join(DIRNAME, 'CapBikeDataLogUpdate.xlsx')
GRAPHPATH = os.path.join(DIRNAME, 'CapBikeScatter.png')
##########################################################################
## A program for exploration with features. Not used for final project results.
##########################################################################
if __name__== "__main__":
#Import the data frames
df = pd.read_excel(DATAPATH,index_col=0)
print df.head()
#Get a summary of the data and print it.
Stats = df.describe()
print Stats
#Create a scatter plot of all the variables and display it.
scatter_matrix(df, alpha=0.2, figsize=(10,10), diagonal='kde')
scatter_fig=plt.gcf()
plt.show()
#Save the scatter plot to file.
scatter_fig.savefig(GRAPHPATH)
#Get names of the dependent variables
X_var = list(df.columns.values)[2:]
#Convert the dataframe into numpy arrays for use with sklearn
data = np.array(df[X_var])
target = np.array(df['y'])
print data.shape
#Select K best features
data_new = SelectKBest(f_regression, k=9).fit_transform(data, target)
print data_new.shape
#Train the data
splits = tts(data_new, target, test_size=0.20)
X_train, X_test, y_train, y_test = splits
print X_train.shape
print y_train.shape
#Run the OLS model.
regr = linear_model.LinearRegression()
regr.fit(X_train, y_train)
print
print "OLS Results"
print regr.coef_
print regr.intercept_
print mean_squared_error(y_test, regr.predict(X_test))
print regr.score(X_test, y_test)
#Run the Ridge model.
clf = linear_model.Ridge(alpha=0.5)
clf.fit(X_train, y_train)
print
print "Ridge Results"
print mean_squared_error(y_test, clf.predict(X_test))
print clf.score(X_test, y_test)
#Run the RidgeCV model.
n_alphas = 200
alphas = np.logspace(-10,-2,n_alphas)
clf = linear_model.RidgeCV(alphas=alphas, cv=12)
clf.fit(X_train, y_train)
print
print "CVRidge Results"
print clf.alpha_
print mean_squared_error(y_test, clf.predict(X_test))
print clf.score(X_test, y_test)
#Run the Decision Tree model.
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_1.fit(X_train, y_train)
clf_2.fit(X_train, y_train)
print
print "Decision Tree Resutls"
print mean_squared_error(y_test, clf_1.predict(X_test))
print mean_squared_error(y_test, clf_2.predict(X_test))
print clf_1.score(X_test, y_test)
print clf_2.score(X_test, y_test)
print data_new[2]
| mit |
chrsrds/scikit-learn | sklearn/cluster/spectral.py | 2 | 21118 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance or None (default)
Determines random number generation for rotation matrix initialization.
Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance, when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int, RandomState instance or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
# The first eigen vector is constant only for fully connected graphs
# and should be kept for spectral clustering (drop_first = False)
# See spectral_embedding documentation.
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
n_clusters : integer, optional
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
n_components : integer, optional, default=n_clusters
Number of eigen vectors to use for the spectral embedding
random_state : int, RandomState instance or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when ``eigen_solver='amg'`` and by
the K-Means initialization. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
gamma : float, default=1.0
Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
Ignored for ``affinity='nearest_neighbors'``.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when ``eigen_solver='arpack'``.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Examples
--------
>>> from sklearn.cluster import SpectralClustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralClustering(n_clusters=2,
... assign_labels="discretize",
... random_state=0).fit(X)
>>> clustering.labels_
array([1, 1, 1, 0, 0, 0])
>>> clustering
SpectralClustering(assign_labels='discretize', n_clusters=2,
random_state=0)
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, n_components=None,
random_state=None, n_init=10, gamma=1., affinity='rbf',
n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans',
degree=3, coef0=1, kernel_params=None, n_jobs=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.n_components = n_components
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features), or \
array-like, shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse matrix is
provided in a format other than ``csr_matrix``, ``csc_matrix``,
or ``coo_matrix``, it will be converted into a sparse
``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64, ensure_min_samples=2)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors,
include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
def fit_predict(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix,
and return cluster labels.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features), or \
array-like, shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse matrix is
provided in a format other than ``csr_matrix``, ``csc_matrix``,
or ``coo_matrix``, it will be converted into a sparse
``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray, shape (n_samples,)
Cluster labels.
"""
return super().fit_predict(X, y)
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
weidel-p/nest-simulator | pynest/examples/repeated_stimulation.py | 2 | 4210 | # -*- coding: utf-8 -*-
#
# repeated_stimulation.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Repeated Stimulation
--------------------
Simple example for how to repeat a stimulation protocol
using the ``origin`` property of devices.
In this example, a ``poisson_generator`` generates a spike train that is
recorded directly by a ``spike_detector``, using the following paradigm:
1. A single trial last for 1000 ms.
2. Within each trial, the ``poisson_generator`` is active from 100 ms to 500 ms.
We achieve this by defining the `start` and `stop` properties of the
generator to 100 ms and 500 ms, respectively, and setting the ``origin`` to the
simulation time at the beginning of each trial. Start and stop are interpreted
relative to the ``origin``.
"""
###############################################################################
# First, the modules needed for simulation and analyis are imported.
import nest
import nest.raster_plot
import matplotlib.pyplot as plt
###############################################################################
# Second, we set the parameters so the ``poisson_generator`` generates 1000
# spikes per second and is active from 100 to 500 ms
rate = 1000.0 # generator rate in spikes/s
start = 100.0 # start of simulation relative to trial start, in ms
stop = 500.0 # end of simulation relative to trial start, in ms
###############################################################################
# The simulation is supposed to take 1s (1000 ms) and is repeated 5 times
trial_duration = 1000.0 # trial duration, in ms
num_trials = 5 # number of trials to perform
###############################################################################
# Third, the network is set up. We reset the kernel and create a
# ``poisson_generator``, in which the handle is stored in `pg`.
#
# The parameters for rate and start and stop of activity are given as optional
# parameters in the form of a dictionary.
nest.ResetKernel()
pg = nest.Create('poisson_generator',
params={'rate': rate,
'start': start,
'stop': stop}
)
###############################################################################
# The ``spike_detector`` is created and the handle stored in `sd`.
sd = nest.Create('spike_detector')
###############################################################################
# The ``Connect`` function connects the nodes so spikes from pg are collected by
# the ``spike_detector`` `sd`
nest.Connect(pg, sd)
###############################################################################
# Before each trial, we set the ``origin`` of the ``poisson_generator`` to the
# current simulation time. This automatically sets the start and stop time of
# the ``poisson_generator`` to the specified times with respect to the origin.
# The simulation is then carried out for the specified time in trial_duration.
for n in range(num_trials):
pg.origin = nest.GetKernelStatus('time')
nest.Simulate(trial_duration)
###############################################################################
# Now we plot the result, including a histogram using the ``nest.raster_plot``
# function. Note: The histogram will show spikes seemingly located before
# 100 ms into each trial. This is due to sub-optimal automatic placement of
# histogram bin borders.
nest.raster_plot.from_device(sd, hist=True, hist_binwidth=100.,
title='Repeated stimulation by Poisson generator')
plt.show()
| gpl-2.0 |
TomAugspurger/pandas | pandas/tests/base/test_conversion.py | 1 | 14519 | import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_timedelta64_dtype
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import CategoricalIndex, Series, Timedelta, Timestamp
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
IntervalArray,
PandasArray,
PeriodArray,
SparseArray,
TimedeltaArray,
)
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable(self, index_or_series, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable_object_and_category(
self, index_or_series, method, dtype, rdtype, obj
):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
def test_iterable_map(self, index_or_series, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == "M"
assert res == exp
@pytest.mark.parametrize(
"array, expected_type, dtype",
[
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(pd.Categorical(["a", "b"]), pd.Categorical, "category"),
(
pd.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
pd.PeriodIndex([2018, 2019], freq="A"),
PeriodArray,
pd.core.dtypes.dtypes.PeriodDtype("A-DEC"),
),
(pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval",),
# This test is currently failing for datetime64[ns] and timedelta64[ns].
# The NumPy type system is sufficient for representing these types, so
# we just use NumPy for Series / DataFrame columns of these types (so
# we get consolidation and so on).
# However, DatetimeIndex and TimedeltaIndex use the DateLikeArray
# abstraction to for code reuse.
# At the moment, we've judged that allowing this test to fail is more
# practical that overriding Series._values to special case
# Series[M8[ns]] and Series[m8[ns]] to return a DateLikeArray.
pytest.param(
pd.DatetimeIndex(["2017", "2018"]),
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values", strict=True)],
),
pytest.param(
pd.TimedeltaIndex([10 ** 10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)],
),
],
)
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
ser = pd.Series(arr)
result = ser.array
expected = PandasArray(arr)
tm.assert_extension_array_equal(result, expected)
def test_numpy_array_all_dtypes(any_numpy_dtype):
ser = pd.Series(dtype=any_numpy_dtype)
result = ser.array
if is_datetime64_dtype(any_numpy_dtype):
assert isinstance(result, DatetimeArray)
elif is_timedelta64_dtype(any_numpy_dtype):
assert isinstance(result, TimedeltaArray)
else:
assert isinstance(result, PandasArray)
@pytest.mark.parametrize(
"array, attr",
[
(pd.Categorical(["a", "b"]), "_codes"),
(pd.core.arrays.period_array(["2000", "2001"], freq="D"), "_data"),
(pd.core.arrays.integer_array([0, np.nan]), "_data"),
(IntervalArray.from_breaks([0, 1]), "_left"),
(SparseArray([0, 1]), "_sparse_values"),
(DatetimeArray(np.array([1, 2], dtype="datetime64[ns]")), "_data"),
# tz-aware Datetime
(
DatetimeArray(
np.array(
["2000-01-01T12:00:00", "2000-01-02T12:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
"_data",
),
],
)
def test_array(array, attr, index_or_series):
box = index_or_series
if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
pytest.skip(f"No index type for {array.dtype}")
result = box(array, copy=False).array
if attr:
array = getattr(array, attr)
result = getattr(result, attr)
assert result is array
def test_array_multiindex_raises():
idx = pd.MultiIndex.from_product([["A"], ["a", "b"]])
msg = "MultiIndex has no single backing array"
with pytest.raises(ValueError, match=msg):
idx.array
@pytest.mark.parametrize(
"array, expected",
[
(np.array([1, 2], dtype=np.int64), np.array([1, 2], dtype=np.int64)),
(pd.Categorical(["a", "b"]), np.array(["a", "b"], dtype=object)),
(
pd.core.arrays.period_array(["2000", "2001"], freq="D"),
np.array([pd.Period("2000", freq="D"), pd.Period("2001", freq="D")]),
),
(
pd.core.arrays.integer_array([0, np.nan]),
np.array([0, pd.NA], dtype=object),
),
(
IntervalArray.from_breaks([0, 1, 2]),
np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object),
),
(SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)),
# tz-naive datetime
(
DatetimeArray(np.array(["2000", "2001"], dtype="M8[ns]")),
np.array(["2000", "2001"], dtype="M8[ns]"),
),
# tz-aware stays tz`-aware
(
DatetimeArray(
np.array(
["2000-01-01T06:00:00", "2000-01-02T06:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
np.array(
[
pd.Timestamp("2000-01-01", tz="US/Central"),
pd.Timestamp("2000-01-02", tz="US/Central"),
]
),
),
# Timedelta
(
TimedeltaArray(np.array([0, 3600000000000], dtype="i8"), freq="H"),
np.array([0, 3600000000000], dtype="m8[ns]"),
),
],
)
def test_to_numpy(array, expected, index_or_series):
box = index_or_series
thing = box(array)
if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
pytest.skip(f"No index type for {array.dtype}")
result = thing.to_numpy()
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize(
"arr", [np.array([1, 2, 3], dtype="int64"), np.array(["a", "b", "c"], dtype=object)]
)
def test_to_numpy_copy(arr, as_series):
obj = pd.Index(arr, copy=False)
if as_series:
obj = pd.Series(obj.values, copy=False)
# no copy by default
result = obj.to_numpy()
assert np.shares_memory(arr, result) is True
result = obj.to_numpy(copy=False)
assert np.shares_memory(arr, result) is True
# copy=True
result = obj.to_numpy(copy=True)
assert np.shares_memory(arr, result) is False
@pytest.mark.parametrize("as_series", [True, False])
def test_to_numpy_dtype(as_series):
tz = "US/Eastern"
obj = pd.DatetimeIndex(["2000", "2001"], tz=tz)
if as_series:
obj = pd.Series(obj)
# preserve tz by default
result = obj.to_numpy()
expected = np.array(
[pd.Timestamp("2000", tz=tz), pd.Timestamp("2001", tz=tz)], dtype=object
)
tm.assert_numpy_array_equal(result, expected)
result = obj.to_numpy(dtype="object")
tm.assert_numpy_array_equal(result, expected)
result = obj.to_numpy(dtype="M8[ns]")
expected = np.array(["2000-01-01T05", "2001-01-01T05"], dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"values, dtype, na_value, expected",
[
([1, 2, None], "float64", 0, [1.0, 2.0, 0.0]),
(
[pd.Timestamp("2000"), pd.Timestamp("2000"), pd.NaT],
None,
pd.Timestamp("2000"),
[np.datetime64("2000-01-01T00:00:00.000000000")] * 3,
),
],
)
def test_to_numpy_na_value_numpy_dtype(
index_or_series, values, dtype, na_value, expected
):
obj = index_or_series(values)
result = obj.to_numpy(dtype=dtype, na_value=na_value)
expected = np.array(expected)
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_kwargs_raises():
# numpy
s = pd.Series([1, 2, 3])
msg = r"to_numpy\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
s.to_numpy(foo=True)
# extension
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(TypeError, match=msg):
s.to_numpy(foo=True)
@pytest.mark.parametrize(
"data",
[
{"a": [1, 2, 3], "b": [1, 2, None]},
{"a": np.array([1, 2, 3]), "b": np.array([1, 2, np.nan])},
{"a": pd.array([1, 2, 3]), "b": pd.array([1, 2, None])},
],
)
@pytest.mark.parametrize("dtype, na_value", [(float, np.nan), (object, None)])
def test_to_numpy_dataframe_na_value(data, dtype, na_value):
# https://github.com/pandas-dev/pandas/issues/33820
df = pd.DataFrame(data)
result = df.to_numpy(dtype=dtype, na_value=na_value)
expected = np.array([[1, 1], [2, 2], [3, na_value]], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"data, expected",
[
(
{"a": pd.array([1, 2, None])},
np.array([[1.0], [2.0], [np.nan]], dtype=float),
),
(
{"a": [1, 2, 3], "b": [1, 2, 3]},
np.array([[1, 1], [2, 2], [3, 3]], dtype=float),
),
],
)
def test_to_numpy_dataframe_single_block(data, expected):
# https://github.com/pandas-dev/pandas/issues/33820
df = pd.DataFrame(data)
result = df.to_numpy(dtype=float, na_value=np.nan)
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_dataframe_single_block_no_mutate():
# https://github.com/pandas-dev/pandas/issues/33820
result = pd.DataFrame(np.array([1.0, 2.0, np.nan]))
expected = pd.DataFrame(np.array([1.0, 2.0, np.nan]))
result.to_numpy(na_value=0.0)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
lhirschfeld/JargonBot | jargonbot.py | 1 | 5805 | # Lior Hirschfeld
# JargonBot
# -- Imports --
import re
import pickle
import random
import praw
from custombot import RedditBot
from time import sleep
from define import getDefinition
from collections import Counter
from nltk.stem import *
from sklearn import linear_model
# -- Setup Variables --
jargonBot = RedditBot('jargonBot')
stemmer = PorterStemmer()
with open('count.txt', 'r') as handle:
count = [line.split()[0] for line in handle.readlines()]
countStemmed = [stemmer.stem(word) for word in count]
with open('languages.pickle', 'rb') as handle:
languages = pickle.load(handle)
# -- Methods --
def jargon(lim, rate, subs, ml=False):
searchReddit(lim, rate, subs, ml)
# Search Reddit for words that need to be defined, and define them.
def searchReddit(lim, rate, subs, ml):
for sub in subs:
searchSub(sub, lim, ml)
jargonBot.updateIds()
if ml:
jargonBot.updateModels(["popularity", "wLength", "cLength"])
sleep(rate)
# Search a sub for words that need to be defined, and define them.
def searchSub(sub, lim, ml):
if sub not in languages:
analyze(sub)
subreddit = jargonBot.r.subreddit(sub)
subWords = [pair[0] for pair in languages[sub].most_common(10000)]
for submission in subreddit.hot(limit=lim):
comment_queue = submission.comments[:]
while comment_queue:
com = comment_queue.pop(0)
if not hasattr(com, 'body') or com.id in jargonBot.ids:
continue
for word in com.body.split():
# Stem the word and check if it is rare enough to be defined.
# Find the most similar word in count to the stemmed word.
word = stemmer.stem(word)
if "'" in word:
continue
if word not in subWords:
for item in countStemmed:
if item == word:
word = item
break
if ml:
if sub not in jargonBot.models:
jargonBot.createModel(sub, [[[1000000, 10, 10]], [10]])
# If ML, after basic checks, predict using the model
# to decide whether to reply.
if word in count:
popularity = count.index(word)
else:
popularity = 1000000
info = {"popularity": popularity, "wLength": len(word),
"cLength": len(com.body), "cID": com.id,
"sID": submission.id, "sub": sub}
if popularity > 10000:
# Sometimes, randomly reply to train the model.
if random.random() < jargonBot.models[sub][1]:
reply(com, word, ml, info=info)
elif jargonBot.models[sub][0].predict([[info["popularity"],
info["wLength"], info["cLength"]]]) > 0:
reply(com, word, ml, info=info)
break
else:
if word not in count[:400000]:
reply(com, word, ml)
break
jargonBot.ids.append(com.id)
comment_queue.extend(com.replies)
# Reply to a comment with a word definition.
def reply(com, word, ml, info=None):
reply = ""
# Get the definition of the word (if it exists)
result = getDefinition(word)
if result != None:
# A definition has been found.
if result[1] != "":
reply += """Definition of **{}**: {}.\n\n>*{}.*""".format(word.lower(), result[0].capitalize(),
result[1].capitalize())
else:
reply += """Definition of **{}**: {}.""".format(word.lower(), result[0].capitalize())
if ml:
reply += """\n\nI am a bot which attempts to define difficult words automatically. I use machine learning to do this, and I can use your feedback to improve. Feel free to leave a comment to let me know what you thought of this definition!"""
reply += "\n\n---------\n\n^Check ^out ^my ^[code](https://github.com/lhirschfeld/JargonBot). "
reply += " ^Please ^contact ^/u/liortulip ^with"
reply += " ^any ^questions ^or ^concerns."
try:
cID = com.reply(reply)
if ml:
info["time"] = datetime.now()
info["cID"] = cID
jargonBot.responses.append(info)
print("Replied")
except praw.exceptions.APIException as error:
print("Hit rate limit error.")
jargonBot.updateIds()
sleep(600)
# Analyze the language of a particular sub.
def analyze(sub):
print("Analyzing:", sub)
subreddit = jargonBot.r.subreddit(sub)
words = Counter()
for submission in subreddit.hot(limit=300):
comment_queue = submission.comments[:]
while comment_queue:
com = comment_queue.pop(0)
if hasattr(com, 'body'):
for word in com.body.split():
# Stem the word and add it to the counter.
word = stemmer.stem(word)
words[word] += 1
languages[sub] = words
with open('languages.pickle', 'wb') as handle:
pickle.dump(languages, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Analyzation complete.")
while True:
jargon(50, 10, ["science", "math", "askreddit"])
jargon(50, 10, ["science", "math", "askreddit"], ml=True)
print("Completed loop")
| mit |
liangfok/controlit_demos | dreamer_controlit_demos/nodes/HandTap.py | 1 | 4435 | #!/usr/bin/env python
'''
Publishes goals to make Dreamer flap her right hand.
'''
import sys, getopt # for getting and parsing command line arguments
import time
import math
import threading
import rospy
from std_msgs.msg import Float64MultiArray, MultiArrayDimension
# import numpy as np
# from scipy.interpolate import interp1d
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
NUM_DOFS = 3 # vector goal is x, y, z
MAX_THETA = 45.0 / 180.0 * math.pi
MIN_THETA = -45.0 / 180.0 * math.pi
INITIAL_THETA = 0.0
ANGULAR_SPEED = 100.0 / 180.0 * math.pi # 5 degrees per second
TRAJECTORY_FREQUENCY = 100.0 # number of trajectory points to generate per second
class HandFlap:
def __init__(self):
# Define the dimensions of the message
dim = MultiArrayDimension()
dim.size = NUM_DOFS
dim.label = "rightHandOrientationGoal"
dim.stride = 1
# Define the goal messages
self.rightHandGoalMsg = Float64MultiArray()
for ii in range(0, NUM_DOFS):
self.rightHandGoalMsg.data.append(0)
self.rightHandGoalMsg.layout.dim.append(dim)
self.rightHandGoalMsg.layout.data_offset = 0
self.rightHandGoalMsg.data[2] = 1 # initial goal is [0, 0, 1] in world frame
self.leftHandGoalMsg = Float64MultiArray()
for ii in range(0, NUM_DOFS):
self.leftHandGoalMsg.data.append(0)
self.leftHandGoalMsg.layout.dim.append(dim)
self.leftHandGoalMsg.layout.data_offset = 0
self.leftHandGoalMsg.data[2] = 1 # initial goal is [0, 0, 1] in world frame
def getTimeSeconds(self):
"""
Returns the current time in seconds.
"""
return rospy.get_time()
def start(self):
"""
Starts publishing the goals to make Dreamer flap her right hand.
"""
rightGoalPub = rospy.Publisher("/dreamer_controller/RightHandOrientation/goalVector", Float64MultiArray, queue_size=1)
leftGoalPub = rospy.Publisher("/dreamer_controller/LeftHandOrientation/goalVector", Float64MultiArray, queue_size=1)
printGoToMax = printGoToMin = True
# state = STATE_GO_TO_START
# goalIndex = 0
# numWaves = 0 # the number of times the robot has waved already
SWEEP_TIME = (MAX_THETA - MIN_THETA) / ANGULAR_SPEED
TOTAL_CYCLE_TIME = SWEEP_TIME * 2
INITIAL_GO_TO_MAX_TIME = (MAX_THETA - INITIAL_THETA) / ANGULAR_SPEED
# Offset the start time by:
# (1) the time to go from the min angle to the initial angle
# (2) the time to go from the max angle to the min angle.
# This is so the trajectory starts at the desired initial theta.
startTime = self.getTimeSeconds() - (SWEEP_TIME - INITIAL_GO_TO_MAX_TIME) - SWEEP_TIME
while not rospy.is_shutdown():
deltaTime = self.getTimeSeconds() - startTime
# check for completion of entire cycle
if deltaTime > TOTAL_CYCLE_TIME:
startTime = startTime + TOTAL_CYCLE_TIME
deltaTime = rospy.get_time() - startTime
printGoToMin = printGoToMax = True
desiredAngle = 0
if deltaTime < SWEEP_TIME:
# go towards min theta
if printGoToMin:
print "Going to min theta..."
printGoToMin = False
deltaAngle = ANGULAR_SPEED * deltaTime
desiredAngle = MAX_THETA - deltaAngle
else:
# go towards max theta
if printGoToMax:
print "Going to max theta..."
printGoToMax = False
deltaTime = deltaTime - SWEEP_TIME
deltaAngle = ANGULAR_SPEED * deltaTime
desiredAngle = MIN_THETA + deltaAngle
self.leftHandGoalMsg.data[0] = self.rightHandGoalMsg.data[0] = math.sin(desiredAngle)
self.leftHandGoalMsg.data[2] = self.rightHandGoalMsg.data[2] = math.cos(desiredAngle)
rightGoalPub.publish(self.rightHandGoalMsg)
leftGoalPub.publish(self.leftHandGoalMsg)
time.sleep(1.0 / TRAJECTORY_FREQUENCY)
# Main method
if __name__ == "__main__":
rospy.init_node('HandFlap', anonymous=True)
HandFlap = HandFlap()
t = threading.Thread(target=HandFlap.start)
t.start()
rospy.spin()
| lgpl-2.1 |
cbmoore/statsmodels | statsmodels/datasets/copper/data.py | 28 | 2316 | """World Copper Prices 1951-1975 dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = "World Copper Market 1951-1975 Dataset"
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """World Copper Market 1951-1975"""
DESCRLONG = """This data describes the world copper market from 1951 through 1975. In an
example, in Gill, the outcome variable (of a 2 stage estimation) is the world
consumption of copper for the 25 years. The explanatory variables are the
world consumption of copper in 1000 metric tons, the constant dollar adjusted
price of copper, the price of a substitute, aluminum, an index of real per
capita income base 1970, an annual measure of manufacturer inventory change,
and a time trend.
"""
NOTE = """
Number of Observations - 25
Number of Variables - 6
Variable name definitions::
WORLDCONSUMPTION - World consumption of copper (in 1000 metric tons)
COPPERPRICE - Constant dollar adjusted price of copper
INCOMEINDEX - An index of real per capita income (base 1970)
ALUMPRICE - The price of aluminum
INVENTORYINDEX - A measure of annual manufacturer inventory trend
TIME - A time trend
Years are included in the data file though not returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the copper data and returns a Dataset class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/copper.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6))
return data
def load_pandas():
"""
Load the copper data and returns a Dataset class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
| bsd-3-clause |
perryjohnson/biplaneblade | biplane_blade_lib/layer_plane_angles_stn24.py | 1 | 9805 | """Determine the layer plane angle of all the elements in a grid.
Author: Perry Roth-Johnson
Last modified: May 1, 2014
Usage:
1. Look through the mesh_stnXX.abq file and find all the element set names.
(Find all the lines that start with "*ELSET".)
2. Enter each of the element set names in one of the four lists below:
(1) list_of_LE_elementsets
(2) list_of_TE_elementsets
(3) list_of_lower_elementsets
(4) list_of_upper_elementsets
3. Run this script. Visually inspect the plot to make sure each of the element
sets are in the correct list. (The blue edge should be facing outward,
relative to the airfoil centerpoint. The magenta edge should be facing
inward.) If you find an element that is oriented incorrectly, note the
element number, and look up it's element set name from the printout in the
IPython terminal. Then, move that element set name to a different list in
this script.
4. Repeat step 3 until your visual inspection suggests that all the edges (and
layer plane angles) are being assigned correctly.
References:
http://stackoverflow.com/questions/3365171/calculating-the-angle-between-two-lines-without-having-to-calculate-the-slope/3366569#3366569
http://stackoverflow.com/questions/19295725/angle-less-than-180-between-two-segments-lines
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import lib.grid as gr
reload(gr)
import lib.abaqus_utils2 as au
reload(au)
import lib.vabs_utils as vu
reload(vu)
import lib.blade as bl
from shapely.geometry import Polygon, LineString
from descartes import PolygonPatch
# -----------------------------------------------
# update these parameters!
station_num = 24
skip_num = 25 # plot every 'skip_num' elements (larger values plot faster)
IS4_resin_u2_tri_elem_num = 4819 # num of tri elem in int surf 3 resin upper 2
IS4_resin_l2_tri_elem_num = 4786 # num of tri elem in int surf 3 resin lower 2
IS4_triax_u2_tri_elem_num = 5014 # num of tri elem in int surf 3 triax upper 2
IS4_triax_l2_tri_elem_num = 4978 # num of tri elem in int surf 3 triax lower 2
TE_reinf_foam_l3_tri_elem_num = 1086 # num of tri elem in TE reinf foam lower 3
TE_reinf_uniax_u4_tri_elem_num = 1217 # num of tri elem in TE reinf uniax upper 4
# -----------------------------------------------
stn_str = 'stn{0:02d}'.format(station_num)
plt.close('all')
# load the biplane blade
b1 = bl.BiplaneBlade(
'biplane blade, flapwise symmetric, no stagger, rj/R=0.452, g/c=1.25',
'biplane_blade')
# pre-process the station dimensions
station = b1.list_of_stations[station_num-1]
st = station.structure
af = station.airfoil
af.create_polygon()
st.create_all_layers()
st.save_all_layer_edges()
st.write_all_part_polygons()
x3_off = af.lower_chord * af.gap_to_chord_ratio * af.gap_fraction
# plot the parts
station.plot_parts_offset(airfoil_to_plot='lower', x3_offset=x3_off)
# create a figure
ax = plt.gcf().gca()
# element sets on the leading edge
# outer_edge_node_nums=[1,4], inner_edge_node_nums=[2,3]
list_of_LE_elementsets = [
'lepanel',
'rbtrile',
'esgelle',
'estrile',
'is1rsle',
'is1trile',
'sw1biaxl',
'sw1foam',
'sw1biaxr',
'is1rlsw1',
'is1tlsw1',
'is2rrsw1',
'is2trsw1'
]
# element sets on the trailing edge
# outer_edge_node_nums=[3,2], inner_edge_node_nums=[4,1]
list_of_TE_elementsets = [
'is3reste',
'is3trite',
'sw2biaxl',
'sw2foam',
'sw2biaxr',
'sw3biaxl',
'sw3foam',
'sw3biaxr',
'is2rlsw2',
'is2tlsw2',
'is3rrsw2',
'is3trsw2',
'is3rlsw3',
'is3tlsw3',
'is4rrsw3',
'is4trsw3'
]
# element sets on the lower surface
# outer_edge_node_nums=[2,1], inner_edge_node_nums=[3,4]
list_of_lower_elementsets = [
'tefoaml1',
'tefoaml2',
'tefoaml3',
'teunil1',
'teunil2',
'teunil3',
'teunil4',
'ap1lower',
'ap2lower',
'esgllap1',
'estrlap1',
'rbtrlap1',
'is3rlap1',
'is3tlap1',
'esgllap2',
'estrlap2',
'is4rlap2',
'is4tlap2',
'sclower',
'rbtriscl',
'rbtrtel1',
'rbtrtel2',
'rbtrtel3',
'rbtrtel4',
'estrtel1',
'estrtel2',
'estrtel3',
'estrtel4',
'esgltel1',
'esgltel2',
'esgltel3',
'esgltel4',
'esgelscl',
'estriscl',
'is2rsscl',
'is2trscl',
'is4rtel1',
'is4rtel2',
'is4ttel1',
'is4ttel2',
'rbtrbsw1',
'esglbsw1',
'estrbsw1',
'esglbsw2',
'estrbsw2',
'rbtrbsw2',
'estrbsw3',
'esglbsw3'
]
# element sets on the upper surface
# outer_edge_node_nums=[4,3], inner_edge_node_nums=[1,2]
list_of_upper_elementsets = [
'tefoamu1',
'tefoamu2',
'tefoamu3',
'teuniu1',
'teuniu2',
'teuniu3',
'teuniu4',
'ap1upper',
'ap2upper',
'esgluap1',
'estruap1',
'rbtruap1',
'is3ruap1',
'is3tuap1',
'esgluap2',
'estruap2',
'is4ruap2',
'is4tuap2',
'is4rteu1',
'is4rteu2',
'is4tteu1',
'is4tteu2',
'esglasw1',
'estrasw1',
'rbtrasw1',
'rbtrteu1',
'rbtrteu2',
'rbtrteu3',
'rbtrteu4',
'estrteu1',
'estrteu2',
'estrteu3',
'estrteu4',
'esglteu1',
'esglteu2',
'esglteu3',
'esglteu4',
'esglasw2',
'estrasw2',
'rbtrasw2',
'esglasw3',
'estrasw3',
'scupper',
'rbtriscu',
'estriscu',
'is2rsscu',
'esgelscu',
'is2trscu'
]
# element sets of triangular elements on the lower surface
# outer_edge_node_nums=[2,1]
list_of_tri_lower_elementsets = [
'is4rtel2_tri',
'is4ttel2_tri',
'tefoaml3_tri'
]
# element sets of triangular elements on the upper surface
# outer_edge_node_nums=[3,2]
list_of_tri_upper_elementsets = [
'is4rteu2_tri',
'is4tteu2_tri',
'teuniu4_tri'
]
# import the initial grid object
fmt_grid = 'biplane_blade/' + stn_str + '/mesh_' + stn_str + '.abq'
g = au.AbaqusGrid(fmt_grid, debug_flag=True)
# manually assign two triangular elements into new element sets
g.list_of_elements[IS4_resin_u2_tri_elem_num-1].element_set = 'is4rteu2_tri'
g.list_of_elements[IS4_resin_l2_tri_elem_num-1].element_set = 'is4rtel2_tri'
g.list_of_elements[IS4_triax_u2_tri_elem_num-1].element_set = 'is4tteu2_tri'
g.list_of_elements[IS4_triax_l2_tri_elem_num-1].element_set = 'is4ttel2_tri'
g.list_of_elements[TE_reinf_foam_l3_tri_elem_num-1].element_set = 'tefoaml3_tri'
g.list_of_elements[TE_reinf_uniax_u4_tri_elem_num-1].element_set = 'teuniu4_tri'
# update the grid object with all the layer plane angles
for elem in g.list_of_elements:
if elem.element_set in list_of_LE_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[1,4],
inner_edge_node_nums=[2,3])
elif elem.element_set in list_of_TE_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[3,2],
inner_edge_node_nums=[4,1])
elif elem.element_set in list_of_lower_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[2,1],
inner_edge_node_nums=[3,4])
elif elem.element_set in list_of_upper_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[4,3],
inner_edge_node_nums=[1,2])
elif elem.element_set in list_of_tri_lower_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[2,1])
elif elem.element_set in list_of_tri_upper_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[3,2])
else:
raise Warning("Element #{0} has no element set!".format(elem.elem_num))
# plot a small selection of elements to check the results
for elem in g.list_of_elements[::skip_num]:
elem.plot(label_nodes=False)
print elem.elem_num, elem.element_set, elem.theta1
g.list_of_elements[IS4_resin_u2_tri_elem_num-1].plot()
g.list_of_elements[IS4_resin_u2_tri_elem_num-2].plot()
g.list_of_elements[IS4_resin_l2_tri_elem_num-1].plot()
g.list_of_elements[IS4_resin_l2_tri_elem_num-2].plot()
g.list_of_elements[IS4_triax_u2_tri_elem_num-1].plot()
g.list_of_elements[IS4_triax_u2_tri_elem_num-2].plot()
g.list_of_elements[IS4_triax_l2_tri_elem_num-1].plot()
g.list_of_elements[IS4_triax_l2_tri_elem_num-2].plot()
g.list_of_elements[TE_reinf_foam_l3_tri_elem_num-1].plot()
g.list_of_elements[TE_reinf_foam_l3_tri_elem_num-2].plot()
g.list_of_elements[TE_reinf_uniax_u4_tri_elem_num-1].plot()
g.list_of_elements[TE_reinf_uniax_u4_tri_elem_num-2].plot()
# show the plot
plt.xlim([-3,5])
plt.ylim([-3,3])
ax.set_aspect('equal')
print ' ------------------------'
print ' LEGEND'
print ' magenta : inner edge'
print ' blue : outer edge'
print ' ------------------------'
plt.show()
# -----------------------------------------------------------------------------
# read layers.csv to determine the number of layers
layer_file = pd.read_csv('biplane_blade/layers.csv', index_col=0)
number_of_layers = len(layer_file)
# write the updated grid object to a VABS input file
fmt_vabs = 'biplane_blade/' + stn_str + '/mesh_' + stn_str + '.vabs'
f = vu.VabsInputFile(
vabs_filename=fmt_vabs,
grid=g,
material_filename='biplane_blade/materials.csv',
layer_filename='biplane_blade/layers.csv',
debug_flag=True,
flags={
'format' : 1,
'Timoshenko' : 1,
'recover' : 0,
'thermal' : 0,
'curve' : 0,
'oblique' : 0,
'trapeze' : 0,
'Vlasov' : 0
})
| gpl-3.0 |
sonnyhu/scikit-learn | sklearn/gaussian_process/tests/test_gpr.py | 11 | 11915 | """Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
"""Test the interpolating property for different kernels."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_true(np.allclose(y_pred, y))
assert_true(np.allclose(np.diag(y_cov), 0.))
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
""" Test that hyperparameter-optimization remains in bounds"""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
""" Test that GP prior has mean 0 and identical variances."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
""" Test that statistics of samples drawn from GP are correct."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
""" Test that kernel parameters are unmodified when optimizer is None."""
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
""" Test that predicted std.-dev. is consistent with cov's diagonal."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
""" Test that GPR can identify meaningful anisotropic length-scales. """
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
""" Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results
"""
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
""" Test that GPR can deal with multi-dimensional target values"""
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
""" Test that GPR can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_duplicate_input():
""" Test GPR can handle two different output-values for the same input. """
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
| bsd-3-clause |
mediagit2016/workcamp-maschinelles-lernen-grundlagen | 17-12-11-workcamp-ml/mglearn/datasets.py | 1 | 1909 | import numpy as np
import pandas as pd
import os
from scipy import signal
from sklearn.datasets import load_boston
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures
from .make_blobs import make_blobs
DATA_PATH = os.path.join(os.path.dirname(__file__), "..", "data")
def make_forge():
# a carefully hand-designed dataset lol
X, y = make_blobs(centers=2, random_state=4, n_samples=30)
y[np.array([7, 27])] = 0
mask = np.ones(len(X), dtype=np.bool)
mask[np.array([0, 1, 5, 26])] = 0
X, y = X[mask], y[mask]
return X, y
def make_wave(n_samples=100):
rnd = np.random.RandomState(42)
x = rnd.uniform(-3, 3, size=n_samples)
y_no_noise = (np.sin(4 * x) + x)
y = (y_no_noise + rnd.normal(size=len(x))) / 2
return x.reshape(-1, 1), y
def load_extended_boston():
boston = load_boston()
X = boston.data
X = MinMaxScaler().fit_transform(boston.data)
X = PolynomialFeatures(degree=2, include_bias=False).fit_transform(X)
return X, boston.target
def load_citibike():
data_mine = pd.read_csv(os.path.join(DATA_PATH, "citibike.csv"))
data_mine['one'] = 1
data_mine['starttime'] = pd.to_datetime(data_mine.starttime)
data_starttime = data_mine.set_index("starttime")
data_resampled = data_starttime.resample("3h").sum().fillna(0)
return data_resampled.one
def make_signals():
# fix a random state seed
rng = np.random.RandomState(42)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
# create three signals
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
# concatenate the signals, add noise
S = np.c_[s1, s2, s3]
S += 0.2 * rng.normal(size=S.shape)
S /= S.std(axis=0) # Standardize data
S -= S.min()
return S
| gpl-3.0 |
amaggi/bda | chapter_03/ex_04.py | 1 | 1516 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import uniform
from scipy.integrate import trapz
from sample_via_cdf import sample_via_cdf
N0 = 674
N0D = 39
N1 = 680
N1D = 22
NPTS = 100
NSAMP = 1000
# set the same uniform prior for the two cases
prior = uniform(0, 0.2)
p0 = np.linspace(prior.ppf(0), prior.ppf(1), NPTS)
p1 = np.linspace(prior.ppf(0), prior.ppf(1), NPTS)
# set up the posterior
post_p0 = np.empty(NPTS, dtype=float)
post_p1 = np.empty(NPTS, dtype=float)
for i in xrange(NPTS):
post_p0[i] = (p0[i]**N0D * (1-p0[i])**(N0-N0D)) * prior.pdf(p0[i])
post_p1[i] = (p1[i]**N1D * (1-p1[i])**(N1-N1D)) * prior.pdf(p1[i])
Z0 = trapz(post_p0, p0)
Z1 = trapz(post_p1, p1)
post_p0 = post_p0/Z0
post_p1 = post_p1/Z1
# get samples from posterior
p0_samp = sample_via_cdf(p0, post_p0, NSAMP)
p1_samp = sample_via_cdf(p1, post_p1, NSAMP)
# get odds_ratio distribution
odds_ratio = (p1_samp/(1-p1_samp)) / (p0_samp/(1-p0_samp))
odds_summary = np.percentile(odds_ratio, [2.5, 50, 97.5])
print odds_summary
# plot
fig, axes = plt.subplots(1, 3)
plt.sca(axes[0])
plt.plot(p0, post_p0, label='control')
plt.plot(p1, post_p1, label='beta-blocker')
plt.xlabel('Probability of death')
plt.ylabel('P(p | data)')
plt.legend()
plt.sca(axes[1])
plt.hist(p0_samp, label='control')
plt.hist(p1_samp, label='treatment')
plt.xlabel('Probability of death')
plt.legend()
plt.sca(axes[2])
plt.hist(odds_ratio, label='odds ratio')
plt.xlabel('odds ratio')
plt.legend()
plt.show()
plt.close()
| gpl-2.0 |
solvebio/solvebio-python | solvebio/utils/tabulate.py | 1 | 20872 | # -*- coding: utf-8 -*-
#
# This file contains code from python-tabulate, modified for SolveBio
#
# Copyright © 2011-2013 Sergey Astanin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import absolute_import
from six.moves import map
from six.moves import range
from six.moves import zip
from six import string_types
from collections import namedtuple
from platform import python_version_tuple
import re
from .printing import TTY_COLS
if python_version_tuple()[0] < "3":
from itertools import izip_longest
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = str
else:
from itertools import zip_longest as izip_longest
from functools import reduce
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = bytes
__all__ = ["tabulate"]
__version__ = "0.6"
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "usecolons",
"with_header_hide",
"without_header_hide"])
_format_defaults = {"padding": 0,
"usecolons": False,
"with_header_hide": [],
"without_header_hide": []}
_table_formats = {"simple":
TableFormat(lineabove=None,
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
usecolons=False,
with_header_hide=["linebelow"],
without_header_hide=[]),
"plain":
TableFormat(None, None, None, None,
DataRow("", " ", ""), DataRow("", " ", ""),
**_format_defaults),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
usecolons=False,
with_header_hide=[],
without_header_hide=["linebelowheader"]),
"pipe":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "|", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
usecolons=True,
with_header_hide=[],
without_header_hide=[]),
"orgmode":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
usecolons=False,
with_header_hide=[],
without_header_hide=["linebelowheader"])}
_invisible_codes = re.compile(r'\x1b\[\d*m') # ANSI color codes
def simple_separated_format(separator):
"""
Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\t") ; \
tabulate([["foo", 1], ["spam", 23]], \
tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=None, datarow=DataRow('', separator, ''),
**_format_defaults)
def _isconvertible(conv, string):
try:
conv(string) # noqa
return True
except (TypeError, ValueError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is int or \
(isinstance(string, _binary_type) or
isinstance(string, string_types)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""
The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif _isint(string):
return _int_type
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""
Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""
Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') \
== ' \u044f\u0439\u0446\u0430'
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) \
if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(s)
def _padright(width, s, has_invisible=True):
"""
Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') \
== '\u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) \
if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(s)
def _padboth(width, s, has_invisible=True):
"""
Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') \
== ' \u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) \
if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(s)
def _strip_invisible(s):
"Remove invisible ANSI color codes."
return re.sub(_invisible_codes, "", s)
def _visible_width(s):
"""
Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len(_strip_invisible(s))
else:
return len(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""
[string] -> [padded_string]
>>> list(map(str,_align_column( \
["12.345", "-1234.5", "1.23", "1234.5", \
"1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', \
' 1234.5 ', ' 1e+234 ', ' 1.0e234']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment in "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment in "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
maxwidth = max(max(list(map(width_fn, strings))), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = {_none_type: 0, int: 1, float: 2, _text_type: 4}
invtypes = {4: _text_type, 2: float, 1: int, 0: _none_type}
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""
The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
"""
types = [_type(s, has_invisible) for s in strings]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""
Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', \
'\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \
\\u0446\\u0438\\u0444\\u0440\\u0430\\n-------\
-------\\n\\u0430\\u0437 \
2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _binary_type, _text_type]:
return "{0}".format(val)
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers, sort=True):
"""
Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* 2D NumPy arrays
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = list(tabular_data.keys())
# columns have to be transposed
rows = list(izip_longest(*list(tabular_data.values())))
elif hasattr(tabular_data, "index"):
# values is a property, has .index then
# it's likely a pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data.keys())
# values matrix doesn't need to be transposed
vals = tabular_data.values
names = tabular_data.index
rows = [[v] + list(row) for v, row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict "
"or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's, as usual, an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, list(range(len(rows[0])))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list, rows))
if sort and len(rows) > 1:
rows = sorted(rows, key=lambda x: x[0])
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""] * (ncols - nhs) + headers
return rows, headers
def _build_row(cells, padding, begin, sep, end):
"Return a string which represents a row of data cells."
pad = " " * padding
padded_cells = [pad + cell + pad for cell in cells]
# SolveBio: we're only displaying Key-Value tuples (dimension of 2).
# enforce that we don't wrap lines by setting a max
# limit on row width which is equal to TTY_COLS (see printing)
rendered_cells = (begin + sep.join(padded_cells) + end).rstrip()
if len(rendered_cells) > TTY_COLS:
if not cells[-1].endswith(" ") and not cells[-1].endswith("-"):
terminating_str = " ... "
else:
terminating_str = ""
rendered_cells = "{0}{1}{2}".format(
rendered_cells[:TTY_COLS - len(terminating_str) - 1],
terminating_str, end)
return rendered_cells
def _build_line(colwidths, padding, begin, fill, sep, end):
"Return a string which represents a horizontal line."
cells = [fill * (w + 2 * padding) for w in colwidths]
return _build_row(cells, 0, begin, sep, end)
def _mediawiki_cell_attrs(row, colaligns):
"Prefix every cell in a row with an HTML alignment attribute."
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
row2 = [alignment[a] + c for c, a in zip(row, colaligns)]
return row2
def _line_segment_with_colons(linefmt, align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
fill = linefmt.hline
w = colwidth
if align in ["right", "decimal"]:
return (fill[0] * (w - 1)) + ":"
elif align == "center":
return ":" + (fill[0] * (w - 2)) + ":"
elif align == "left":
return ":" + (fill[0] * (w - 1))
else:
return fill[0] * w
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if headers else fmt.without_header_hide
pad = fmt.padding
headerrow = fmt.headerrow if fmt.headerrow else fmt.datarow
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(colwidths, pad, *fmt.lineabove))
if headers:
lines.append(_build_row(headers, pad, *headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
begin, fill, sep, end = fmt.linebelowheader
if fmt.usecolons:
segs = [
_line_segment_with_colons(fmt.linebelowheader, a, w + 2 * pad)
for w, a in zip(colwidths, colaligns)]
lines.append(_build_row(segs, 0, begin, sep, end))
else:
lines.append(_build_line(colwidths, pad, *fmt.linebelowheader))
if rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in rows[:-1]:
lines.append(_build_row(row, pad, *fmt.datarow))
lines.append(_build_line(colwidths, pad, *fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(rows[-1], pad, *fmt.datarow))
else:
for row in rows:
lines.append(_build_row(row, pad, *fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(colwidths, pad, *fmt.linebelow))
return "\n".join(lines)
def tabulate(tabular_data, headers=[], tablefmt="orgmode",
floatfmt="g", aligns=[], missingval="", sort=True, is_tsv=False):
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers,
sort=sort)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(
['\t'.join(map(_text_type, headers))] +
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c, ct in zip(cols, coltypes)]
# align columns
if not aligns:
# dynamic alignment by col type
aligns = ["decimal" if ct in [int, float] else "left"
for ct in coltypes]
minwidths = [width_fn(h) + 2 for h in headers] if headers \
else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
minwidths = [max(minw, width_fn(c[0]))
for minw, c in zip(minwidths, cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, aligns, minwidths)]
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["orgmode"])
# make sure values don't have newlines or tabs in them, except for tsv output_format where
# we have to add spaces in order to simulate tab separators
rows = [[str(c).replace('\n', '').replace('\t', ' ' if is_tsv else '').replace('\r', '')
for c in r] for r in rows]
return _format_table(tablefmt, headers, rows, minwidths, aligns)
if __name__ == "__main__":
data = [
("gene_symbols", ["CPB1"]),
("clinical_significance", "other"),
("clinical_origin", ["somatic"]),
("alternate_alleles", ["T"]), ]
print(tabulate(data,
headers=('Fields', 'Data'),
aligns=('right', 'left'), sort=True))
print(tabulate(data,
headers=('Fields', 'Data'),
aligns=('right', 'left'), sort=False))
| mit |
RPGOne/Skynet | scikit-learn-0.18.1/examples/linear_model/plot_multi_task_lasso_support.py | 102 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
mahak/spark | python/pyspark/sql/pandas/typehints.py | 26 | 6324 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql.pandas.utils import require_minimum_pandas_version
def infer_eval_type(sig):
"""
Infers the evaluation type in :class:`pyspark.rdd.PythonEvalType` from
:class:`inspect.Signature` instance.
"""
from pyspark.sql.pandas.functions import PandasUDFType
require_minimum_pandas_version()
import pandas as pd
annotations = {}
for param in sig.parameters.values():
if param.annotation is not param.empty:
annotations[param.name] = param.annotation
# Check if all arguments have type hints
parameters_sig = [annotations[parameter] for parameter
in sig.parameters if parameter in annotations]
if len(parameters_sig) != len(sig.parameters):
raise ValueError(
"Type hints for all parameters should be specified; however, got %s" % sig)
# Check if the return has a type hint
return_annotation = sig.return_annotation
if sig.empty is return_annotation:
raise ValueError(
"Type hint for the return type should be specified; however, got %s" % sig)
# Series, Frame or Union[DataFrame, Series], ... -> Series or Frame
is_series_or_frame = (
all(a == pd.Series or # Series
a == pd.DataFrame or # DataFrame
check_union_annotation( # Union[DataFrame, Series]
a,
parameter_check_func=lambda na: na == pd.Series or na == pd.DataFrame)
for a in parameters_sig) and
(return_annotation == pd.Series or return_annotation == pd.DataFrame))
# Iterator[Tuple[Series, Frame or Union[DataFrame, Series], ...] -> Iterator[Series or Frame]
is_iterator_tuple_series_or_frame = (
len(parameters_sig) == 1 and
check_iterator_annotation( # Iterator
parameters_sig[0],
parameter_check_func=lambda a: check_tuple_annotation( # Tuple
a,
parameter_check_func=lambda ta: (
ta == Ellipsis or # ...
ta == pd.Series or # Series
ta == pd.DataFrame or # DataFrame
check_union_annotation( # Union[DataFrame, Series]
ta,
parameter_check_func=lambda na: (
na == pd.Series or na == pd.DataFrame))))) and
check_iterator_annotation(
return_annotation,
parameter_check_func=lambda a: a == pd.DataFrame or a == pd.Series))
# Iterator[Series, Frame or Union[DataFrame, Series]] -> Iterator[Series or Frame]
is_iterator_series_or_frame = (
len(parameters_sig) == 1 and
check_iterator_annotation(
parameters_sig[0],
parameter_check_func=lambda a: (
a == pd.Series or # Series
a == pd.DataFrame or # DataFrame
check_union_annotation( # Union[DataFrame, Series]
a,
parameter_check_func=lambda ua: ua == pd.Series or ua == pd.DataFrame))) and
check_iterator_annotation(
return_annotation,
parameter_check_func=lambda a: a == pd.DataFrame or a == pd.Series))
# Series, Frame or Union[DataFrame, Series], ... -> Any
is_series_or_frame_agg = (
all(a == pd.Series or # Series
a == pd.DataFrame or # DataFrame
check_union_annotation( # Union[DataFrame, Series]
a,
parameter_check_func=lambda ua: ua == pd.Series or ua == pd.DataFrame)
for a in parameters_sig) and (
# It's tricky to include only types which pd.Series constructor can take.
# Simply exclude common types used here for now (which becomes object
# types Spark can't recognize).
return_annotation != pd.Series and
return_annotation != pd.DataFrame and
not check_iterator_annotation(return_annotation) and
not check_tuple_annotation(return_annotation)
))
if is_series_or_frame:
return PandasUDFType.SCALAR
elif is_iterator_tuple_series_or_frame or is_iterator_series_or_frame:
return PandasUDFType.SCALAR_ITER
elif is_series_or_frame_agg:
return PandasUDFType.GROUPED_AGG
else:
raise NotImplementedError("Unsupported signature: %s." % sig)
def check_tuple_annotation(annotation, parameter_check_func=None):
# Python 3.6 has `__name__`. Python 3.7 and 3.8 have `_name`.
# Check if the name is Tuple first. After that, check the generic types.
name = getattr(annotation, "_name", getattr(annotation, "__name__", None))
return name == "Tuple" and (
parameter_check_func is None or all(map(parameter_check_func, annotation.__args__)))
def check_iterator_annotation(annotation, parameter_check_func=None):
name = getattr(annotation, "_name", getattr(annotation, "__name__", None))
return name == "Iterator" and (
parameter_check_func is None or all(map(parameter_check_func, annotation.__args__)))
def check_union_annotation(annotation, parameter_check_func=None):
import typing
# Note that we cannot rely on '__origin__' in other type hints as it has changed from version
# to version. For example, it's abc.Iterator in Python 3.7 but typing.Iterator in Python 3.6.
origin = getattr(annotation, "__origin__", None)
return origin == typing.Union and (
parameter_check_func is None or all(map(parameter_check_func, annotation.__args__)))
| apache-2.0 |
sarahgrogan/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
hrjn/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 21 | 4784 | """
=======================================================================================
Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation
=======================================================================================
This is an example of applying :class:`sklearn.decomposition.NMF`
and :class:`sklearn.decomposition.LatentDirichletAllocation` on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
Non-negative Matrix Factorization is applied with two different objective
functions: the Frobenius norm, and the generalized Kullback-Leibler divergence.
The latter is equivalent to Probabilistic Latent Semantic Indexing.
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
print()
# Fit the NMF model
print("Fitting the NMF model (Frobenius norm) with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (Frobenius norm):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
# Fit the NMF model
print("Fitting the NMF model (generalized Kullback-Leibler divergence) with "
"tf-idf features, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1, beta_loss='kullback-leibler',
solver='mu', max_iter=1000, alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (generalized Kullback-Leibler divergence):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
Mistobaan/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 9 | 6700 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testFeatureEngineeringFnWithSameName(self):
def input_fn():
return {
"x": constant_op.constant(["9."])
}, {
"y": constant_op.constant(["99."])
}
def feature_engineering_fn(features, labels):
# Github #12205: raise a TypeError if called twice.
_ = string_ops.string_split(features["x"])
features["x"] = constant_op.constant([9.])
labels["y"] = constant_op.constant([99.])
return features, labels
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
ssanderson/dask | dask/dataframe/tests/test_rolling.py | 6 | 2485 | import pandas as pd
import pandas.util.testing as tm
import numpy as np
import dask.dataframe as dd
from dask.async import get_sync
from dask.utils import raises, ignoring
def eq(p, d):
if isinstance(d, dd.DataFrame):
tm.assert_frame_equal(p, d.compute(get=get_sync))
else:
tm.assert_series_equal(p, d.compute(get=get_sync))
def rolling_tests(p, d):
eq(pd.rolling_count(p, 3), dd.rolling_count(d, 3))
eq(pd.rolling_sum(p, 3), dd.rolling_sum(d, 3))
eq(pd.rolling_mean(p, 3), dd.rolling_mean(d, 3))
eq(pd.rolling_median(p, 3), dd.rolling_median(d, 3))
eq(pd.rolling_min(p, 3), dd.rolling_min(d, 3))
eq(pd.rolling_max(p, 3), dd.rolling_max(d, 3))
eq(pd.rolling_std(p, 3), dd.rolling_std(d, 3))
eq(pd.rolling_var(p, 3), dd.rolling_var(d, 3))
eq(pd.rolling_skew(p, 3), dd.rolling_skew(d, 3))
eq(pd.rolling_kurt(p, 3), dd.rolling_kurt(d, 3))
eq(pd.rolling_quantile(p, 3, 0.5), dd.rolling_quantile(d, 3, 0.5))
mad = lambda x: np.fabs(x - x.mean()).mean()
eq(pd.rolling_apply(p, 3, mad), dd.rolling_apply(d, 3, mad))
with ignoring(ImportError):
eq(pd.rolling_window(p, 3, 'boxcar'), dd.rolling_window(d, 3, 'boxcar'))
# Test with edge-case window sizes
eq(pd.rolling_sum(p, 0), dd.rolling_sum(d, 0))
eq(pd.rolling_sum(p, 1), dd.rolling_sum(d, 1))
# Test with kwargs
eq(pd.rolling_sum(p, 3, min_periods=3), dd.rolling_sum(d, 3, min_periods=3))
def test_rolling_series():
ts = pd.Series(np.random.randn(25).cumsum())
dts = dd.from_pandas(ts, 3)
rolling_tests(ts, dts)
def test_rolling_dataframe():
df = pd.DataFrame({'a': np.random.randn(25).cumsum(),
'b': np.random.randn(25).cumsum()})
ddf = dd.from_pandas(df, 3)
rolling_tests(df, ddf)
def test_raises():
df = pd.DataFrame({'a': np.random.randn(25).cumsum(),
'b': np.random.randn(25).cumsum()})
ddf = dd.from_pandas(df, 3)
assert raises(TypeError, lambda: dd.rolling_mean(ddf, 1.5))
assert raises(ValueError, lambda: dd.rolling_mean(ddf, -1))
assert raises(NotImplementedError, lambda: dd.rolling_mean(ddf, 3, freq=2))
assert raises(NotImplementedError, lambda: dd.rolling_mean(ddf, 3, how='min'))
def test_rolling_names():
df = pd.DataFrame({'a': [1, 2, 3],
'b': [4, 5, 6]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(dd.rolling_sum(a, 2).dask) == sorted(dd.rolling_sum(a, 2).dask)
| bsd-3-clause |
yanchen036/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 28 | 5024 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
from tensorflow.python.util.deprecation import deprecated
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
@deprecated(None, 'Please use tf.estimator.inputs.pandas_input_fn')
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
@deprecated(None, 'Please access pandas data directly.')
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
@deprecated(None, 'Please access pandas data directly.')
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
@deprecated(None, 'Please access pandas data directly.')
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
srio/Diffraction | correlation_lengths.py | 1 | 3847 | """
correlation_lengths.py: calculates
"""
import numpy
__author__ = "Manuel Sanchez del Rio"
__contact__ = "srio@esrf.eu"
__copyright = "ESRF, 2016"
def std0(x):
mu = 0.0 # x.mean()
return numpy.sqrt(numpy.mean(abs(x - mu)**2))
def coherence_length_longitudinal(lambda1,lambda2,lambda0=None):
delta_lambda = numpy.abs(lambda1 - lambda2)
if lambda0 == None:
lambda0 = 0.5 * numpy.abs(lambda1 + lambda2)
return lambda0**2 / (2 * delta_lambda)
def coherence_length_transverse(lambda0,distance,separation_at_source):
return lambda0 * distance / (2 * numpy.abs(separation_at_source))
def coherence_length_histogram(lambda0,distance,sigma,mu=0.0,npoints=1000,do_plot=True):
#source
s1 = numpy.random.normal(mu, sigma, npoints)
s2 = numpy.random.normal(mu, sigma, npoints)
# s1 = (numpy.random.random(npoints) - 0.5 ) * sigma * 2.35
# s2 = (numpy.random.random(npoints) - 0.5 ) * sigma * 2.35
# separation_at_source
x1 = numpy.outer(s1,numpy.ones(s2.size))
x2 = numpy.outer(numpy.ones(s1.size),s2)
dd = numpy.abs(x1-x2).flatten()
separation_at_source = numpy.unique(dd)[1:-1]
cl = coherence_length_transverse(lambda0,distance,separation_at_source)
if do_plot:
import matplotlib.pyplot as plt
count, bins, ignored = plt.hist(s1, 100, normed=True)
plt.plot(bins, 1/(sigma * numpy.sqrt(2 * numpy.pi)) * numpy.exp( - (bins - mu)**2 / (2 * sigma**2) ),
linewidth=2, color='r')
plt.show()
# https://arxiv.org/pdf/1508.02238v1.pdf
count, bins, ignored = plt.hist(1e6*separation_at_source, 100, normed=True)
sigma_estimated = sigma * numpy.sqrt(2)
print("StDev of source separation = %f um; estimating = %f um"%
(1e6*std0(separation_at_source),1e6*sigma_estimated))
plt.plot(bins, 2/(1e6*sigma_estimated * numpy.sqrt(2 * numpy.pi)) * numpy.exp( - (bins - 0.0)**2 / (2 * (1e6*sigma_estimated)**2) ),
linewidth=2, color='r')
plt.show()
#
count, bins, ignored = plt.hist(1e6*cl, 500, normed=True, range = [0,10*1e6*coherence_length_transverse(lambda0,distance,2.35*sigma)])
plt.show()
return bins[count.argmax()]*1e-6
if __name__ == "__main__":
lambda0 = 1e-10
distance = 30.0
delta_lambda = lambda0 * 1e-4
lambda1 = lambda0 - 0.5 * delta_lambda
lambda2 = lambda0 + 0.5 * delta_lambda
sigma_v = 3.5e-6
sigma_lb = 37.4e-6
sigma_hb = 387.8e-6
sigma_ebs = 1e-6 # 27.2e-6
print("Wavelength = %f A"%(lambda0*1e10))
print("Correlation length (longitudinal): %f um"%(1e6*coherence_length_longitudinal(lambda1,lambda2)))
print("Correlation length V (transversal): %f um"%(1e6*coherence_length_transverse(lambda0,distance,2.35*sigma_v)))
print("Correlation length H Hb (transversal): %f um"%(1e6*coherence_length_transverse(lambda0,distance,2.35*sigma_hb)))
print("Correlation length H Lb (transversal): %f um"%(1e6*coherence_length_transverse(lambda0,distance,2.35*sigma_lb)))
print("Correlation length H EBS (transversal): %f um"%(1e6*coherence_length_transverse(lambda0,distance,2.35*sigma_ebs)))
# print("HISTOGRAM Correlation length (longitudinal): %f um"%(1e6*coherence_length_longitudinal(lambda1,lambda2)))
# print("HISTOGRAM Correlation length V (transversal): %f um"%(1e6*coherence_length_histogram(lambda0,distance,sigma_v)))
# print("HISTOGRAM Correlation length H Hb (transversal): %f um"%(1e6*coherence_length_histogram(lambda0,distance,sigma_hb)))
# print("HISTOGRAM Correlation length H Lb (transversal): %f um"%(1e6*coherence_length_histogram(lambda0,distance,sigma_lb)))
print("HISTOGRAM Correlation length H EBS (transversal): %f um"%(1e6*coherence_length_histogram(lambda0,distance,sigma_ebs)))
| gpl-2.0 |
tanmoy7989/idp | analyzeGo.py | 1 | 11444 | '''/*
* ----------------------------------------------------------------------------
* "THE BEER-WARE LICENSE" (Revision 42):
* <tanmoy.7989@gmail.com> wrote this file. As long as you retain this notice you
* can do whatever you want with this stuff. If we meet some day, and you think
* this stuff is worth it, you can buy me a beer in return. Tanmoy Sanyal
* ----------------------------------------------------------------------------
*/
'''
#!/usr/bin/env python
import os, sys, pickle, numpy as np
import sim, protein, pickleTraj, measure
# Histogram settings
StepFreq = 1
NBlocks = 1
NBins = 50
# compute flags
computeRMSD = True
computeClust = False
computeFoldCurve = True
computeContactDist = False
doPlot = True
# user input
Prefix = os.path.abspath(sys.argv[1])
NativePdb = os.path.abspath(sys.argv[2]) # must be coarse grained
OutputDir = os.path.abspath(os.path.dirname(NativePdb))
if not os.path.dirname(Prefix): Prefix = os.path.join(OutputDir, Prefix)
TempSet = float(sys.argv[3])
Temps = np.loadtxt(os.path.join(OutputDir, 'temps.txt'))
# sampling
TrajFn = Prefix + '.lammpstrj.%3.2f.gz' % TempSet
Trj = pickleTraj(TrajFn) ; NFrames = len(Trj)
FrameRange = range(0, len(Trj), StepFreq)
NFrames = len(FrameRange)
# sequence details
p = protein.ProteinClass(Pdb = NativePdb)
Seq = p.Seq ; NRes = len(Seq)
ResTypes = list(set(Seq)) ; NResTypes = len(ResTypes)
# map res atom ind to atomind
def res2atom(resind, resatomind):
if resatomind > 3: raise TypeError('Each residue has 4 CG beads numbered 0,1,2,3')
startind = 0
for i,r in enumerate(Seq[:resind]):
if r == 'GLY': startind += 3
else: startind += 4
return startind + resatomind
# reference native coordinates
RefPos = p.Pos
# store backbone indices
BBinds = []
for i, r in enumerate(Seq):
BBinds.extend( [res2atom(i,x) for x in [0,1,2]] )
# get residue COM map
COMinds = []
for i, r in enumerate(Seq):
resatominds = [0,1,2] if r == 'GLY' else [0,1,2,3]
COMinds.append( [res2atom(i,x) for x in resatominds] )
######## RMSD ANALYSIS ########
if computeRMSD:
# RMSD per atom
rmsd_all_frame = np.zeros([NFrames])
rmsd_atom_frame = np.zeros([NFrames, len(BBinds)], np.float64)
pb = sim.utility.ProgressBar(Text = 'Calculating per frame RMSD at %3.2f K...' % TempSet, Steps = NFrames)
for i, frame in enumerate(FrameRange):
Pos1 = RefPos[BBinds]
Pos2 = Trj[frame][BBinds]
Pos1Vec, Pos2Vec, RotMat, Residuals = sim.geom.AlignmentRMSD(Pos1 = Pos1, Pos2 = Pos2)
rmsd_all_frame[i] = sim.geom.RMSD(Pos1 = Pos1, Pos2 = Pos2)
Pos1_ = Pos1 + Pos1Vec ; Pos2_ = np.dot(Pos2 + Pos2Vec, RotMat)
for j in range(len(BBinds)):
rmsd_atom_frame[i,j] = sim.geom.dRMSD(Pos1_[j], Pos2_[j])
pb.Update(i)
# RMSD per residue and per residue type
BlockSize = int(NFrames / NBlocks)
rmsd_resind = np.zeros(NRes) ; rmsd_resind_err = np.zeros(NRes) ; rmsd_resind_block = np.zeros([NBlocks, NRes], np.float64)
rmsd_restype = {} ; rmsd_restype_err = {} ; rmsd_restype_block = {} ; n = {}
for i, r in enumerate(Seq):
rmsd_restype[r] = 0.0
rmsd_restype_err[r] = 0.0
rmsd_restype_block[r] = np.zeros(NBlocks)
n[r] = 0
for b in range(NBlocks):
start = b * BlockSize
stop = (b+1) * BlockSize
rmsd_atom_block = rmsd_atom_frame[start:stop]
for i, r in enumerate(Seq):
rmsd_resind_block[b, i] = np.mean(rmsd_atom_block[:, i*3 : (i+1)*3])
rmsd_restype_block[r][b] += rmsd_resind_block[b, i]
if b == 0: n[r] += 1
rmsd_resind = np.mean(rmsd_resind_block, axis = 0)
if NBlocks > 1: rmsd_resind_err = np.std(rmsd_resind_block, axis = 0, ddof = 1)
for i, r in enumerate(Seq):
rmsd_restype[r] = np.mean(rmsd_restype_block[r]/n[r])
if NBlocks > 1: rmsd_restype_err[r] = np.std(rmsd_restype_block[r]/n[r], ddof = 1)
# RMSD distribution
print '\nBinning RMSD'
measure.NFrames = NFrames
measure.NBins = NBins
measure.NBlocks = NBlocks
rmsd_all_hist = measure.makeHist(rmsd_all_frame)
# dump RMSD data
pickleName = Prefix + '_RMSD_%3.2fK.pickle' % TempSet
ret = ( (rmsd_all_frame, rmsd_atom_frame),
(rmsd_resind, rmsd_resind_err),
(rmsd_restype, rmsd_restype_err),
rmsd_all_hist )
pickle.dump(ret, open(pickleName, 'w'))
######## CLUSTER ANALYSIS ########
if computeClust:
clustret = sim.cluster.ClusterMSS(Trj = Trj, Cutoff = 3.0)
sumfileName = Prefix + '_clustsum_%3.2fK.txt' % TempSet
clustfileName = Prefix + '_clust_%3.2fK.pdb' % TempSet
sim.cluster.WriteClustResults(Trj, clustret, sumfileName, clustfileName)
######## FOLDING CURVE ANALYSIS ########
if computeFoldCurve:
import analysis as a
a.Prefix = os.path.join(OutputDir, Prefix)
a.RawDataShelf = os.path.join(OutputDir, 'rawdata.shelf')
a.ConfigWeightShelf = os.path.join(OutputDir, 'confweight.shelf')
a.Temps = np.loadtxt(os.path.join(OutputDir, 'temps.txt'))
a.TrajFnList = [Prefix + '.lammpstrj.%3.2f.gz' % t for t in a.Temps]
a.EneFnList = [Prefix + '.ene.dat.%3.2f' % t for t in a.Temps]
a.GetData(RefPos = RefPos, CalcInd = BBinds)
a.NBins = NBins ; a.NBlocks = NBlocks
#temps, foldfrac, err = a.FoldCurve(RMSDCut = 3.0)
a.FreeEne2D(OrderParam = ('Rg', 'RMSD'), TempSet = TempSet)
######## CONTACT DISTANCES COMPARISON ########
if computeContactDist:
import proteinfunc as pfunc
BoxL = Trj.FrameData['BoxL'] ;invBoxL = 1./BoxL
MinCO = 3
ResRadius = 8.0 #A
AAContactDist = {}
AAContactMap = np.zeros([NRes, NRes], np.int16)
p.Pos = RefPos ; ResPos = p.ResPos()
N = len(ResPos)
for i in range(0, N-1):
Pos1 = ResPos[i]
for j in range(i+1, N):
rij = ResPos[j] - Pos1
rsq = np.dot(rij, rij)
AAContactDist[(i,j)] = np.sqrt(rsq)
if rsq <= ResRadius*ResRadius and j - i >= MinCO:
AAContactMap[i,j] = 1
AAContactMap[j,i] = 1
CGContactDist = {} ; n = {}
CGContactMap = np.zeros([NRes, NRes], np.float64)
pb = sim.utility.ProgressBar(Text = 'Enumerating CG contacts at %3.2f K...' % TempSet, Steps = NFrames)
for ii, frame in enumerate(FrameRange):
p.Pos = Trj[ii] ; ResPos = p.ResPos()
N = len(ResPos)
for i in range(0, N-1):
Pos1 = ResPos[i]
for j in range(i+1, N):
rij = ResPos[j] - Pos1
if np.all(BoxL): rij -= BoxL * round(rij * invBoxL)
rsq = np.dot(rij, rij)
k = (i,j)
if not CGContactDist.has_key(k):
CGContactDist[k] = 0.0
n[k] = 0
else:
CGContactDist[k] += np.sqrt(rsq)
n[(i,j)] += 1
if rsq <= ResRadius*ResRadius and j - i >= MinCO:
CGContactMap[i,j] += 1.0
CGContactMap[j,i] += 1.0
pb.Update(ii)
for k in CGContactDist.keys():
if not n[k] == 0: CGContactDist[k] /= float(n[k])
CGContactMap /= NFrames
pickleName = Prefix + '_contactdistcorr_%3.2fK.pickle' % TempSet
ret = ( (AAContactMap, CGContactMap), (AAContactDist, CGContactDist) )
pickle.dump(ret, open(pickleName, 'w'))
######## PLOTTING ########
if doPlot:
import matplotlib.pyplot as plt
import matplotlib.cm as cm
#### PLOT RESIDUE CONTRIBUTION TO BACKBONE RMSD ####
pickleName = Prefix + '_RMSD_%3.2fK.pickle' % TempSet
if os.path.isfile(pickleName):
data = pickle.load(open(pickleName, 'r'))
rmsd_restype, rmsd_restype_err = data[2]
rmsd_hist = data[3]
fig = plt.figure(facecolor = 'w', edgecolor = 'w', figsize = (10, 5))
x, y, err = rmsd_hist
ax1 = fig.add_subplot(1,2,1)
ax1.errorbar(x, y, yerr = err, lw = 3, marker = 'o', markersize = 10, color = 'blue')
ax1.set_xlabel(r'$RMSD (\AA)$' + ' from native', fontsize = 20)
ax1.set_ylabel('distribution', fontsize = 20)
x = [] ; y = []; yerr = []
for k in rmsd_restype.keys():
x.append(k)
y.append(rmsd_restype[k])
yerr.append(rmsd_restype_err[k])
ax2 = fig.add_subplot(1,2,2)
index = np.arange(0, len(x), 1)
rect = ax2.bar(index, y, width = 0.6, align = 'center', alpha=0.4, color='r', yerr = yerr)
ax2.set_xticks(index)
ax2.set_xticklabels([]) ; ax2.set_xticklabels(x, rotation = 45)
ax2.set_ylabel('Avg. ' + r'$RMSD (\AA)$' + ' from native', fontsize = 20)
#### PLOT FREE ENERGY SURFACE ####
pickleName = Prefix + '_Rg_RMSD_MBAR.pickle'
if os.path.isfile(pickleName):
(x,y), pmf, err = pickle.load(open(pickleName, 'r'))
fig = plt.figure(facecolor = 'w', edgecolor = 'w', figsize = (5,5))
ax = fig.add_subplot(1,1,1)
im = ax.imshow(np.transpose(pmf), origin = 'lower', aspect = 'auto', cmap = cm.nipy_spectral,
interpolation = "none", extent = ([x.min(), x.max(), y.min(), y.max()]) )
ax.set_xlabel(r'$R_g (\AA)$', fontsize = 15)
ax.set_ylabel(r'$RMSD (\AA)$' + ' from native', fontsize = 15)
fig.colorbar(im, orientation = 'vertical', label = 'pmf (kcal/mol)')
#### PLOT FOLDING FRACTION ####
pickleName = Prefix + '_foldcurve_MBAR.pickle'
if os.path.isfile(pickleName):
temps, foldfrac, err = pickle.load(open(pickleName, 'r'))
fig = plt.figure(facecolor = 'w', edgecolor = 'w', figsize = (5,5))
ax = fig.add_subplot(1,1,1)
ax.errorbar(temps, foldfrac, yerr = err, lw = 3, marker = 'o', markersize = 6)
ax.set_xlim([0.98 * min(temps), 1.02 * max(temps)])
ax.set_xlabel('Temp (K)', fontsize = 15)
ax.set_ylabel('folding fraction', fontsize = 15)
#### PLOT CONTACT DIST CORRELATION ####
pickleName = Prefix + '_contactdistcorr_%3.2fK.pickle' % TempSet
if os.path.isfile(pickleName):
ret = pickle.load(open(pickleName, 'r'))
fig = plt.figure(facecolor = 'w', edgecolor = 'w')
AAContactMap, CGContactMap = ret[0]
ax1 = fig.add_subplot(1,3,1)
im1 = ax1.imshow(np.logical_not(AAContactMap), cmap = cm.gray, origin = 'lower', aspect = 'equal', interpolation = None)
ax1.set_title('Native contact map')
ax2 = fig.add_subplot(1,3,2)
im2 = ax2.imshow(np.logical_not(CGContactMap), cmap = cm.gray, origin = 'lower', aspect = 'equal', interpolation = None)
ax2.set_title('CG contact map')
AAContactDist, CGContactDist = ret[1]
ax3 = fig.add_subplot(1,3,3)
x = [] ; y = []
for k in AAContactDist.keys():
x.append(AAContactDist[k])
y.append(CGContactDist[k])
R = np.corrcoef(x,y)[0,1]
ax3.scatter(x, y, s = 30, c = 'r', label = '')
ax3.plot(np.sort(x), np.sort(x), 'k-', lw = 3, label = 'R = %g' % R)
ax3.legend(loc = 'best', prop = {'size': 15})
ax3.set_xlabel('AA contact distance ' + r'$(\AA)$', fontsize = 15)
ax3.set_ylabel('CG contact distance ' + r'$(\AA)$', fontsize = 15)
#plt.tight_layout()
plt.show()
| gpl-3.0 |
musically-ut/statsmodels | statsmodels/datasets/fertility/data.py | 26 | 2511 | #! /usr/bin/env python
"""World Bank Fertility Data."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This data is distributed according to the World Bank terms of use. See SOURCE."""
TITLE = """World Bank Fertility Data"""
SOURCE = """
This data has been acquired from
The World Bank: Fertility rate, total (births per woman): World Development Indicators
At the following URL: http://data.worldbank.org/indicator/SP.DYN.TFRT.IN
The sources for these statistics are listed as
(1) United Nations Population Division. World Population Prospects
(2) United Nations Statistical Division. Population and Vital Statistics Repot (various years)
(3) Census reports and other statistical publications from national statistical offices
(4) Eurostat: Demographic Statistics
(5) Secretariat of the Pacific Community: Statistics and Demography Programme
(6) U.S. Census Bureau: International Database
The World Bank Terms of Use can be found at the following URL
http://go.worldbank.org/OJC02YMLA0
"""
DESCRSHORT = """Total fertility rate represents the number of children that would be born to a woman if she were to live to the end of her childbearing years and bear children in accordance with current age-specific fertility rates."""
DESCRLONG = DESCRSHORT
#suggested notes
NOTE = """
::
This is panel data in wide-format
Number of observations: 219
Number of variables: 58
Variable name definitions:
Country Name
Country Code
Indicator Name - The World Bank Series indicator
Indicator Code - The World Bank Series code
1960 - 2013 - The fertility rate for the given year
"""
import numpy as np
import pandas as pd
from statsmodels.datasets import utils as du
from os.path import dirname, abspath, join
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
names = data.columns.tolist()
dtype = zip(names, ['a45', 'a3', 'a40', 'a14'] + ['<f8'] * 54)
data = map(tuple, data.values.tolist())
dataset = du.Dataset(data=np.array(data, dtype=dtype), names=names)
return dataset
def load_pandas():
data = _get_data()
return du.Dataset(data=data)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = pd.read_csv(join(filepath, 'fertility.csv'))
return data | bsd-3-clause |
RuthAngus/turnip | turnip/calc_completeness.py | 1 | 1492 | """
Ruth's version of Burke's test_comp_grid.py as a function.
"""
import numpy as np
import matplotlib.pyplot as plt
import KeplerPORTs_utils as kpu
def calc_comp(kepid, period, radius):
"""
Calculate the completeness at a given radius and period for a KIC star.
This includes the probability of transiting.
parameters:
----------
kepid: (int)
The KIC id.
period: (float)
The target period.
radius: (float)
The target radius.
returns:
--------
The Completeness.
FIXME: Interpolate instead of finding nearest.
"""
# Instantiate pipeline completeness class structure
doit = kpu.kepler_single_comp_data()
doit.id = kepid
doit.period_want = np.array([period])
doit.rp_want = np.array([radius])
doit.rstar = 0.98
doit.logg = 4.44
doit.deteffver = 2
doit.ecc = 0.0
doit.dataspan = 1426.7
doit.dutycycle = 0.879
doit.pulsedurations = [1.5, 2.0, 2.5, 3.0, 3.5, 4.5, 5.0, 6.0, 7.5, 9.0,
10.5, 12.0, 12.5, 15.0]
doit.cdpps = [36.2, 33.2, 31.0, 29.4, 28.0, 26.1, 25.4, 24.2, 23.1, 22.4,
21.9, 21.8, 21.7, 21.5]
doit.mesthresh = np.full_like(doit.pulsedurations,7.1)
# Calculate completeness over the grid of periods and radii.
probdet, probtot = kpu.kepler_single_comp(doit)
return probtot[0][0]
if __name__ == "__main__":
print(calc_comp(10593626, 365.25, 1))
print(calc_comp(10141213, 365.25, 1))
| mit |
michigraber/scikit-learn | sklearn/feature_extraction/text.py | 24 | 50103 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
jpbarraca/dRonin | python/calibration/mag_calibration.py | 1 | 4547 | #!/usr/bin/env python
from numpy import *
from matplotlib.pylab import *
def mag_calibration(mag,gyros=None,LH=200,LV=500):
""" Calibrates the magnetometer data by fitting it to a sphere,
ideally when constantly turning to spread the data around that
sphere somewhat evenly (or at least in a horizontal plane)"""
import numpy
from scipy.optimize import minimize
from numpy.core.multiarray import arange
def find_spinning(mag,gyros):
""" return the indicies in the magnetometer data when
the gyro indicates it is spinning on the z axis """
import scipy.signal
from matplotlib.mlab import find
threshold = 40
spinning = scipy.signal.medfilt(abs(gyros['z'][:,0]),kernel_size=5) > threshold
# make sure to always find end elements
spinning = numpy.concatenate((numpy.array([False]),spinning,numpy.array([False])))
start = find(spinning[1:] & ~spinning[0:-1])
stop = find(~spinning[1:] & spinning[0:-1])-1
tstart = gyros['time'][start]
tstop = gyros['time'][stop]
idx = numpy.zeros((0),dtype=int)
for i in arange(tstart.size):
i1 = abs(mag['time']-tstart[i]).argmin()
i2 = abs(mag['time']-tstop[i]).argmin()
idx = numpy.concatenate((idx,arange(i1,i2,dtype=int)))
return idx
if gyros is not None:
idx = find_spinning(mag,gyros)
else:
idx = arange(mag['time'].size)
mag_x = mag['x'][idx,0]
mag_y = mag['y'][idx,0]
mag_z = mag['z'][idx,0]
rx = max(mag_x) - min(mag_x)
ry = max(mag_y) - min(mag_y)
mx = rx / 2 + min(mag_x)
my = ry / 2 + min(mag_y)
def distortion(x,mag_x=mag_x,mag_y=mag_y,mag_z=mag_z,LH=LH,LV=LV):
""" loss function for distortion from spherical data """
from numpy import sqrt, mean
cor_x = mag_x * x[0] - x[3]
cor_y = mag_y * x[1] - x[4]
cor_z = mag_z * x[2] - x[5]
l = sqrt(cor_x**2 + cor_y**2 + cor_z**2)
L0 = sqrt(LH**2 + LV**2)
spherical_error = numpy.mean((l - L0)**2)
# note that ideally the horizontal error would be calculated
# after correcting for attitude but that requires high temporal
# accuracy from attitude which we don't want to requires. this
# works well in practice.
lh = sqrt(cor_x**2 + cor_y**2)
err = (lh - LH)**2
horizontal_error = numpy.mean(err)
# weight both the spherical error and the horizontal error
# components equally
return spherical_error+horizontal_error
cons = ({'type': 'ineq', 'fun' : lambda x: numpy.array([x[0] - 0.5])},
{'type': 'ineq', 'fun' : lambda x: numpy.array([x[1] - 0.5])},
{'type': 'ineq', 'fun' : lambda x: numpy.array([x[2] - 0.5])})
opts = {'xtol': 1e-8, 'disp': False, 'maxiter': 10000}
# method of COBYLA also works well
x0 = numpy.array([1, 1, 1, numpy.mean(mag_x), numpy.mean(mag_y), numpy.mean(mag_z)])
res = minimize(distortion, x0, method='COBYLA', options=opts, constraints=cons)
x = res.x
cor_x = mag_x * x[0] - x[3]
cor_y = mag_y * x[1] - x[4]
cor_z = mag_z * x[2] - x[5]
import matplotlib
from numpy import sqrt
matplotlib.pyplot.subplot(1,2,1)
matplotlib.pyplot.plot(cor_x,cor_y,'.',cor_x,cor_z,'.',cor_z,cor_y,'.')
#matplotlib.pyplot.xlim(-1,1)
#matplotlib.pyplot.ylim(-1,1)
matplotlib.pyplot.subplot(1,2,2)
matplotlib.pyplot.plot(sqrt(cor_x**2+cor_y**2+cor_z**2))
return res, cor_x, cor_y, cor_z
def main():
import sys, os
sys.path.insert(1, os.path.dirname(sys.path[0]))
from dronin import telemetry
uavo_list = telemetry.get_telemetry_by_args()
from dronin.uavo import UAVO_Magnetometer, UAVO_Gyros
print mag_calibration(uavo_list.as_numpy_array(UAVO_Magnetometer), uavo_list.as_numpy_array(UAVO_Gyros))
# Wait for user to close window.
matplotlib.pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
macks22/gensim | gensim/sklearn_api/w2vmodel.py | 1 | 3341 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <chinmayapancholi13@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
import six
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
class W2VTransformer(TransformerMixin, BaseEstimator):
"""
Base Word2Vec module
"""
def __init__(self, size=100, alpha=0.025, window=5, min_count=5, max_vocab_size=None, sample=1e-3, seed=1,
workers=3, min_alpha=0.0001, sg=0, hs=0, negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0,
trim_rule=None, sorted_vocab=1, batch_words=10000):
"""
Sklearn wrapper for Word2Vec model. See gensim.models.Word2Vec for parameter details.
"""
self.gensim_model = None
self.size = size
self.alpha = alpha
self.window = window
self.min_count = min_count
self.max_vocab_size = max_vocab_size
self.sample = sample
self.seed = seed
self.workers = workers
self.min_alpha = min_alpha
self.sg = sg
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.hashfxn = hashfxn
self.iter = iter
self.null_word = null_word
self.trim_rule = trim_rule
self.sorted_vocab = sorted_vocab
self.batch_words = batch_words
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.Word2Vec
"""
self.gensim_model = models.Word2Vec(
sentences=X, size=self.size, alpha=self.alpha,
window=self.window, min_count=self.min_count, max_vocab_size=self.max_vocab_size,
sample=self.sample, seed=self.seed, workers=self.workers, min_alpha=self.min_alpha,
sg=self.sg, hs=self.hs, negative=self.negative, cbow_mean=self.cbow_mean,
hashfxn=self.hashfxn, iter=self.iter, null_word=self.null_word, trim_rule=self.trim_rule,
sorted_vocab=self.sorted_vocab, batch_words=self.batch_words
)
return self
def transform(self, words):
"""
Return the word-vectors for the input list of words.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
check = lambda x: [x] if isinstance(x, six.string_types) else x
words = check(words)
X = [[] for _ in range(0, len(words))]
for k, v in enumerate(words):
word_vec = self.gensim_model[v]
X[k] = word_vec
return np.reshape(np.array(X), (len(words), self.size))
def partial_fit(self, X):
raise NotImplementedError(
"'partial_fit' has not been implemented for W2VTransformer. "
"However, the model can be updated with a fixed vocabulary using Gensim API call."
)
| lgpl-2.1 |
Haleyo/spark-tk | regression-tests/sparktkregtests/testcases/frames/frame_matrix_datatype_test.py | 11 | 8964 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests matrix datatype on frames """
import unittest
import numpy
from itertools import ifilter, imap
from sparktkregtests.lib import sparktk_test
from sparktk.dtypes import matrix, vector
class FrameMatrixDataTypeTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build frames to be exercised and establish known baselines"""
super(FrameMatrixDataTypeTest, self).setUp()
self.dataset = [["A", [[1,2],[3,4]]], ["B", [[5,6],[7,8]]], ["C", [[9,10],[11,12],[13,14]]]]
self.schema = [("C0", str), ("C1", matrix)]
def test_frame_create_row_count(self):
""" Trivial Frame creation. """
frame = self.context.frame.create(self.dataset,
schema=self.schema)
self.assertEqual(frame.count(), len(self.dataset))
self.assertEqual(len(frame.take(3)), 3)
# test to see if taking more rows than exist still
# returns only the right number of rows
self.assertEqual(len(frame.take(10)), len(self.dataset))
@unittest.skip("sparktk: schema inference between matrix and vector is ambiguous")
def test_without_schema(self):
"""Test without a specified schema"""
frame = self.context.frame.create(self.dataset)
self.assertEqual(frame.schema, self.schema)
@unittest.skip("sparktk: schema inference between matrix and vector is ambiguous")
def test_with_validate_schema_no_schema_provided(self):
"""Test without a specified schema validating the schema"""
frame = self.context.frame.create(self.dataset, validate_schema=True)
self.assertEqual(frame.schema, self.schema)
def test_with_validate_schema_with_valid_schema(self):
"""Test with validate_schema true and also a valid schema"""
# should default to using the defined schema
frame = self.context.frame.create(self.dataset,
validate_schema=True,
schema=self.schema)
self.assertEqual(frame.schema, self.schema)
def test_validate_schema_with_invalid_schema_all_columns_same_datatype(self):
"""Test with validate_schema=True and invalid schema, columns same type"""
invalid_schema = [("col1", int), ("col2", int)]
validated_frame = self.context.frame.create(self.dataset,
validate_schema=True,
schema=invalid_schema)
for row in validated_frame.take(validated_frame.count()):
for item in row:
if type(item) is not int:
self.assertEqual(item, None)
def test_validate_schema_of_strs(self):
"""Test validate schema true with schema of strs"""
schema = [("C0", str), ("C1", str)]
# should not throw an exception
# if the datatype can be cast to the schema-specified
# datatype validate schema should just cast it
# since ints and floats can be cast to string
# it should not error but should cast all of the data to strings
frame = self.context.frame.create(self.dataset, schema=schema, validate_schema=True)
for row in frame.take(frame.count()):
# the data should all be cast to str by validate_schema=True
for item in row:
self.assertEqual(type(item), str)
def test_add_columns(self):
"""Test add columns on matrix column data"""
frame = self.context.frame.create(self.dataset, self.schema)
# Add the number of rows of the matrix as a column named shape
frame.add_columns(lambda row: row["C1"].shape[0], ('shape', int))
obtained_result = frame.take(10, columns='shape')
expected_result = [[numpy.array(item[1]).shape[0]] for item in self.dataset]
self.assertEqual(obtained_result, expected_result)
def test_filter(self):
"""Test filter on matrix column data"""
frame = self.context.frame.create(self.dataset, self.schema)
# Get number of rows in each matrix from shape of the underlying ndarray
frame.filter(lambda row: row["C1"].shape[0] == 2)
obtained_result = frame.count()
obtained_result_matrix = frame.take(10, columns='C1')
# Get expected result by converting the actual dataset to ndarray and testing the same condition
filtered_result_matrix = list(ifilter(lambda i: numpy.array(i[1]).shape[0] == 2, self.dataset))
expected_result_matrix = list(imap(lambda row: [numpy.array(row[1])], filtered_result_matrix))
expected_result = len(expected_result_matrix)
self.assertEqual(obtained_result, expected_result)
numpy.testing.assert_array_equal(obtained_result_matrix, expected_result_matrix)
def test_convert_matrix_col_to_vector(self):
""" Convert a matrix column to vector using add_columns"""
frame = self.context.frame.create(self.dataset, self.schema)
# Filter the rows which have more than 2 rows as the final vector construction can be for only 2 values
# as vector needs the length to be defined
frame.filter(lambda row: row["C1"].shape[0] == 2)
# Add first column of each matrix as a new column with vector data type
frame.add_columns(lambda row: row["C1"][:,0], ('first_column', vector(2)))
obtained_result = frame.take(10, columns='first_column')
# Convert the first 2 elements of the dataset to numpy array and get the fist column
expected_result = [[numpy.array(item[1])[:,0]] for item in self.dataset[:2]]
numpy.testing.assert_array_equal(obtained_result, expected_result)
def test_covariance_matrix(self):
"""Test the output of dicom_covariance_matrix"""
frame = self.context.frame.create(self.dataset, self.schema)
frame.matrix_covariance_matrix("C1")
results = frame.to_pandas(frame.count())
#compare result
for i, row in results.iterrows():
actual_cov = row['CovarianceMatrix_C1']
#expected ouput using numpy's covariance method
expected_cov = numpy.cov(row['C1'])
numpy.testing.assert_almost_equal(
actual_cov, expected_cov,
decimal=4, err_msg="cov incorrect")
def test_matrix_svd(self):
""" Test matrix svd operation on the frame"""
frame = self.context.frame.create(self.dataset, self.schema)
frame.matrix_svd("C1")
#compare matrix_svd output with numpy's svd
results = frame.to_pandas(frame.count())
for i, row in results.iterrows():
actual_U = row['U_C1']
actual_V = row['Vt_C1']
actual_s = row['SingularVectors_C1']
#expected ouput using numpy's svd
U, s, V = numpy.linalg.svd(row['C1'])
numpy.testing.assert_almost_equal(
actual_U, U, decimal=4,
err_msg="U incorrect")
numpy.testing.assert_almost_equal(
actual_V, V, decimal=4,
err_msg="V incorrect")
numpy.testing.assert_almost_equal(
actual_s[0], s, decimal=4,
err_msg="Singual vectors incorrect")
def test_matrix_pcs(self):
""" Test matrix pca operation on frame"""
dataset = [["A", [[1,2,3],[3,4,5],[2,6,7]]],
["B", [[5,6,7],[7,8,9],[4,3,5]]],
["C", [[9,10,11],[11,12,13],[13,14,15]]]]
frame = self.context.frame.create(dataset, self.schema)
frame.matrix_svd("C1")
frame.matrix_pca("C1", "Vt_C1")
#compare matrix_pca output with numpy's
results = frame.to_pandas(frame.count())
for i, row in results.iterrows():
actual_pcs = row['PrincipalComponents_C1']
#expected ouput using numpy's svd
U, s, V = numpy.linalg.svd(row['C1'])
expected_pcs = row['C1'].dot(V.T)
numpy.testing.assert_almost_equal(
actual_pcs, expected_pcs, decimal=4,
err_msg="pcs incorrect")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
moutai/scikit-learn | sklearn/model_selection/_validation.py | 2 | 36962 | """
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import KFold
from ._split import LabelKFold
from ._split import LeaveOneLabelOut
from ._split import LeaveOneOut
from ._split import LeavePLabelOut
from ._split import LeavePOut
from ._split import ShuffleSplit
from ._split import LabelShuffleSplit
from ._split import StratifiedKFold
from ._split import StratifiedShuffleSplit
from ._split import PredefinedSplit
from ._split import check_cv, _safe_split
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
ALL_CVS = {'KFold': KFold,
'LabelKFold': LabelKFold,
'LeaveOneLabelOut': LeaveOneLabelOut,
'LeaveOneOut': LeaveOneOut,
'LeavePLabelOut': LeavePLabelOut,
'LeavePOut': LeavePOut,
'ShuffleSplit': ShuffleSplit,
'LabelShuffleSplit': LabelShuffleSplit,
'StratifiedKFold': StratifiedKFold,
'StratifiedShuffleSplit': StratifiedShuffleSplit,
'PredefinedSplit': PredefinedSplit}
LABEL_CVS = {'LabelKFold': LabelKFold,
'LeaveOneLabelOut': LeaveOneLabelOut,
'LeavePLabelOut': LeavePLabelOut,
'LabelShuffleSplit': LabelShuffleSplit}
def cross_val_score(estimator, X, y=None, labels=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, labels))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, labels=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, labels))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, labels=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, labels, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state),
labels, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, labels, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, labels):
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
indices[this_mask] = random_state.permutation(indices[this_mask])
return y[indices]
def learning_curve(estimator, X, y, labels=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = cv.split(X, y, labels)
# Make a list since we will be iterating multiple times over the folds
cv_iter = list(cv_iter)
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv.split(X, y, labels))
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv_iter
for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, labels=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv.split(X, y, labels) for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
SyntaxVoid/PyFusionGUI | pyfusion/acquisition/MDSPlus/h1ds.py | 1 | 5339 | """Python module for the H1 data system.
This code works with python2 and python3.
It is assumed most users will be using python2, so the general
design pattern is
try:
python2 code
except (ImportError, etc):
python3 code
Dependencies: numpy
Optional: matplotlib (for plotting)
"""
try: # python2
from urlparse import urlsplit, urlunsplit
except ImportError: # python3
from urllib.parse import urlsplit, urlunsplit
try: # python2
from urllib2 import urlopen
except ImportError: #python3
from urllib.request import urlopen
from xml.dom import minidom
from datetime import datetime
from struct import unpack_from, calcsize
try:
import numpy as np
except ImportError:
print("Can't find Numpy. Please install it.")
header_dict = {'signal_min':'X-H1DS-signal-min',
'signal_delta':'X-H1DS-signal-delta',
'signal_units':'X-H1DS-signal-units',
'dim_t0':'X-H1DS-dim-t0',
'dim_delta':'X-H1DS-dim-delta',
'dim_length':'X-H1DS-dim-length',
'dim_units':'X-H1DS-dim-units'}
class H1MDSData:
def __init__(self, shot, shottime, tree, path, data):
self.shot = shot
self.shottime = shottime
self.tree = tree
self.path = path
self.data = data
class Signal:
def __init__(self, signal, dim, signal_units, dim_units):
self.signal = signal
self.signal_units = signal_units
self.dim = dim
self.dim_units = dim_units
def plot(self):
import pylab as pl
pl.plot(self.dim, self.signal)
pl.xlabel(self.dim_units)
pl.ylabel(self.signal_units)
pl.grid(True)
pl.show()
def add_query_to_url(url, query):
url_parts = urlsplit(url)
if url_parts.query == '':
new_query = query
else:
new_query = '&'.join([url_parts.query, query])
new_url_tuple = (url_parts.scheme,
url_parts.netloc,
url_parts.path,
new_query,
url_parts.fragment)
return urlunsplit(new_url_tuple)
def signal_from_binary_url(url):
u = urlopen(url)
headers = u.info()
bin_data = u.read()
# assume little endian short (signed) integer (ref: http://docs.python.org/library/struct.html)
# binary format details should be passed in HTTP headers....
d = unpack_from('<%dh' %(len(bin_data)/calcsize('<h')),bin_data)
h = {}
try:
for h_name, h_str in header_dict.items():
h[h_name] = headers[h_str]
except:
for h_name, h_str in header_dict.items():
h[h_name] = headers[lower(h_str)]
s_arr = float(h['signal_min']) + float(h['signal_delta'])*np.array(d)
dim_arr = float(h['dim_t0']) + float(h['dim_delta'])*np.arange(int(h['dim_length']), dtype=np.float32)
signal = Signal(s_arr, dim_arr, h['signal_units'], h['dim_units'])
return signal
simple_xml_value = lambda doc, tag: doc.getElementsByTagName(tag)[0].firstChild.nodeValue
def data_from_url(url):
"""Retrieve data object from H1DS URL."""
# We use the XML view here, so make sure the URL has view=xml GET query.
url = add_query_to_url(url, 'view=xml')
xml_doc = minidom.parse(urlopen(url))
shot_number = int(simple_xml_value(xml_doc, 'shot_number'))
shot_time_str = simple_xml_value(xml_doc, 'shot_time')
mds_tree = simple_xml_value(xml_doc, 'mds_tree')
mds_path = simple_xml_value(xml_doc, 'mds_path')
shot_time = datetime.strptime(shot_time_str, "%d-%b-%Y %H:%M:%S.%f")
data_node = xml_doc.getElementsByTagName('data')[0]
data_node_type = data_node.getAttribute('type')
if data_node_type == 'signal':
data_url = data_node.firstChild.nodeValue
data = signal_from_binary_url(data_url)
elif data_node_type == 'scalar':
# TODO: use proper data types
data = float(data_node.firstChild.nodeValue)
elif data_node_type == 'text':
data = data_node.firstChild.nodeValue
data_obj = H1MDSData(shot_number, shot_time, mds_tree, mds_path, data)
return data_obj
def data_from_mds(mds_tree, mds_path, shot_number):
query = '?shot=%(shot)d&mds-tree=%(mds_tree)s&mds-path=%(mds_path)s' %{'shot':int(shot_number),
'mds_tree':mds_tree,
'mds_path':mds_path}
url = 'http://h1svr.anu.edu.au/mdsplus/request_url'+query
xml_doc = minidom.parse(urlopen(url))
mds_url_path = simple_xml_value(xml_doc, 'mds_url')
mds_url = 'http://h1svr.anu.edu.au'+mds_url_path
return data_from_url(mds_url)
#############
# Test code #
#############
def do_test():
#test_url = 'http://h1svr/mdsplus/h1data/58623/OPERATIONS/MIRNOV/A14_14/INPUT_2/'
#return data_from_url(test_url)
return data_from_mds('h1data', '.operations.mirnov:a14_14:input_2', 58623)
#######################################
# Uncomment these lines to run tests: #
# From here... #
#######################################
if __name__ == "__main__":
test_data = do_test()
#######################################
# To here. #
#######################################
| gpl-3.0 |
mrshu/scikit-learn | examples/document_clustering.py | 1 | 3298 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two algorithms are demoed: ordinary k-means and its faster cousin minibatch
k-means.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: Simplified BSD
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm.")
print __doc__
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print "Loading 20 newsgroups dataset for categories:"
print categories
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print "%d documents" % len(dataset.data)
print "%d categories" % len(dataset.target_names)
print
labels = dataset.target
true_k = np.unique(labels).shape[0]
print "Extracting features from the training dataset using a sparse vectorizer"
t0 = time()
vectorizer = TfidfVectorizer(max_df=0.5, max_features=10000,
stop_words='english')
X = vectorizer.fit_transform(dataset.data)
print "done in %fs" % (time() - t0)
print "n_samples: %d, n_features: %d" % X.shape
print
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000,
batch_size=1000, verbose=1)
else:
km = KMeans(n_clusters=true_k, init='random', max_iter=100, n_init=1,
verbose=1)
print "Clustering sparse data with %s" % km
t0 = time()
km.fit(X)
print "done in %0.3fs" % (time() - t0)
print
print "Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_)
print "Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_)
print "V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_)
print "Adjusted Rand-Index: %.3f" % \
metrics.adjusted_rand_score(labels, km.labels_)
print "Silhouette Coefficient: %0.3f" % metrics.silhouette_score(
X, labels, sample_size=1000)
print
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/tests/test_kernel_approximation.py | 78 | 7586 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# abbreviations for easier formula
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# abbreviations for easier formula
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
zutshi/S3CAMX | examples/dc_motor/dc_motor.py | 1 | 3624 | # Must satisfy the signature
# [t,X,D,P] = sim_function(T,X0,D0,P0,I0);
import numpy as np
from scipy.integrate import ode
#import matplotlib.pyplot as plt
J = 0.01
K = 0.01
L = 0.5
R = 1.0
b = 0.1
A = np.matrix([[-b/J, K/J], [-K/L, -R/L]])
B = np.matrix([[0.0, -1/J], [1/L, 0.0]])
C = np.matrix([1.0, 0.0])
D = np.matrix([0.0, 0.0])
# TODO: (1) function signatures!
# (2) all arrays should be matrices!
class SIM(object):
def __init__(self, plt, pvt_init_data):
self.sim = self.sim_sparse
def sim_dense(self, TT, X0, D, P, U, I, property_checker, property_violated_flag):
# atol = 1e-10
rtol = 1e-5
num_dim_x = len(X0)
plot_data = [np.empty(0, dtype=float), np.empty((0, num_dim_x), dtype=float)]
# tt,YY,dummy_D,dummy_P
solver = ode(dyn).set_integrator('dopri5', rtol=rtol)
Ti = TT[0]
Tf = TT[1]
T = Tf - Ti
if property_checker:
violating_state = [()]
solver.set_solout(solout_fun(property_checker, violating_state, plot_data)) # (2)
solver.set_initial_value(X0, t=0.0)
solver.set_f_params(U)
X_ = solver.integrate(T)
# Y = C*x + D*u
if property_checker is not None:
if property_checker(Tf, X_):
property_violated_flag[0] = True
dummy_D = np.zeros(D.shape)
dummy_P = np.zeros(P.shape)
ret_t = Tf
ret_X = X_
# ret_Y = Y
ret_D = dummy_D
ret_P = dummy_P
return (ret_t, ret_X, ret_D, ret_P)
def sim_sparse(self, TT, X0, D, P, U, I, property_checker, property_violated_flag):
# atol = 1e-10
rtol = 1e-5
num_dim_x = len(X0)
plot_data = [np.empty(0, dtype=float), np.empty((0, num_dim_x), dtype=float)]
# tt,YY,dummy_D,dummy_P
solver = ode(dyn).set_integrator('dopri5', rtol=rtol)
Ti = TT[0]
Tf = TT[1]
T = Tf - Ti
if property_checker:
violating_state = [()]
solver.set_solout(solout_fun(property_checker, violating_state, plot_data)) # (2)
solver.set_initial_value(X0, t=0.0)
solver.set_f_params(U)
X_ = solver.integrate(T)
# Y = C*x + D*u
if property_checker is not None:
if property_checker(Tf, X_):
property_violated_flag[0] = True
dummy_D = np.zeros(D.shape)
dummy_P = np.zeros(P.shape)
ret_t = Tf
ret_X = X_
# ret_Y = Y
ret_D = dummy_D
ret_P = dummy_P
#plt.plot(plot_data[0] + Ti, plot_data[1][:, 0])
#plt.plot(plot_data[0] + Ti, plot_data[1][:, 1])
#plt.plot(plot_data[1][:, 0], plot_data[1][:, 1])
##plt.plot(plot_data[0] + Ti, np.tile(U, plot_data[0].shape))
return (ret_t, ret_X, ret_D, ret_P)
# State Space Modeling Template
# dx/dt = Ax + Bu
# y = Cx + Du
def dyn(t, x, u):
# TODO: temp hack as noise is still not implemented. Force noise = 0
u = np.matrix([u[0], 0.0]).T
x = np.matrix(x).T
X_ = A*x + B*u
# print X_
return np.array(X_.T)
def solout_fun(property_checker, violating_state, plot_data):
def solout(t, Y):
plot_data[0] = np.concatenate((plot_data[0], np.array([t])))
plot_data[1] = np.concatenate((plot_data[1], np.array([Y])))
# print Y
# print t, Y
# if property_checker(t, Y):
# pvf_local[0] = True
# violating_state[0] = (np.copy(t), np.copy(Y))
#
# # print 'violation found:', violating_state[0]
return 0
return solout
| bsd-2-clause |
ECP-CANDLE/Benchmarks | common/darts/meters/epoch.py | 1 | 1196 | import os
import pandas as pd
from darts.meters.average import AverageMeter
from darts.meters.accuracy import MultitaskAccuracyMeter
class EpochMeter:
""" Track epoch loss and accuracy """
def __init__(self, tasks, name='train'):
self.name = name
self.loss_meter = AverageMeter(name)
self.acc_meter = MultitaskAccuracyMeter(tasks)
self.reset()
def reset(self):
self.loss = []
self.acc = { task: [] for task, _ in self.acc_meter.meters.items() }
def update_batch_loss(self, loss, batch_size):
self.loss_meter.update(loss, batch_size)
def update_batch_accuracy(self, acc, batch_size):
self.acc_meter.update(acc, batch_size)
def update_epoch(self):
self.loss.append(self.loss_meter.avg)
for task, acc in self.acc_meter.meters.items():
self.acc[task].append(acc.avg)
def dataframe(self):
results = self.acc
results['loss'] = self.loss
return pd.DataFrame(results)
def save(self, path):
os.makedirs(path, exist_ok=True)
path = os.path.join(path, f'{self.name}_epoch_results')
self.dataframe().to_csv(path, index=False)
| mit |
yasirkhan380/Tutorials | notebooks/fig_code/helpers.py | 74 | 2301 | """
Small helpers for code that is not shown in the notebooks
"""
from sklearn import neighbors, datasets, linear_model
import pylab as pl
import numpy as np
from matplotlib.colors import ListedColormap
# Create color maps for 3-class classification problem, as with iris
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
def plot_iris_knn():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
knn = neighbors.KNeighborsClassifier(n_neighbors=3)
knn.fit(X, y)
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.xlabel('sepal length (cm)')
pl.ylabel('sepal width (cm)')
pl.axis('tight')
def plot_polynomial_regression():
rng = np.random.RandomState(0)
x = 2*rng.rand(100) - 1
f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9
y = f(x) + .4 * rng.normal(size=100)
x_test = np.linspace(-1, 1, 100)
pl.figure()
pl.scatter(x, y, s=4)
X = np.array([x**i for i in range(5)]).T
X_test = np.array([x_test**i for i in range(5)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='4th order')
X = np.array([x**i for i in range(10)]).T
X_test = np.array([x_test**i for i in range(10)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='9th order')
pl.legend(loc='best')
pl.axis('tight')
pl.title('Fitting a 4th and a 9th order polynomial')
pl.figure()
pl.scatter(x, y, s=4)
pl.plot(x_test, f(x_test), label="truth")
pl.axis('tight')
pl.title('Ground truth (9th order polynomial)')
| bsd-3-clause |
niliafsari/KSP-SN | compileSN.py | 1 | 5339 | import urllib
import os
import glob
import subprocess
import commands
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from astropy.time import Time
from moon import *
import csv
import json
from pprint import pprint
import os.path
import sys
sys.path.insert(0, '/home/afsari/')
from SNAP2.Analysis import *
with open("logs/sn_names_color.txt") as f:
file_names = f.readlines()
file_names = [line.rstrip('\n') for line in file_names]
files_count=len(file_names)
Band='B'
coef = {'B': 3.626, 'V': 2.742, 'I': 1.505, 'i': 1.698}
coef = {'B': 4.315, 'V': 3.315, 'I': 1.940, 'i': 2.086}
add=[]
for i,sn_name in enumerate(file_names):
print "name",sn_name
mag = np.zeros(shape=(0, 5))
location='/home/afsari/PycharmProjects/kspSN/SN_json/'
print os.path.isfile(location+sn_name+'.json')
if os.path.isfile(location+sn_name+'.json')==False:
url='https://sne.space/astrocats/astrocats/supernovae/output/json/'+sn_name+'.json'
urllib.urlretrieve(url,location+sn_name+'.json')
with open(location+sn_name+'.json') as data_file:
print data_file
data = json.load(data_file)
redshift=data[sn_name]["redshift"][0]["value"]
ebv=data[sn_name]["ebv"][0]["value"]
if sn_name == 'SN2014cx':
print ebv
ds=np.zeros(shape=(0,5))
for dat in data[sn_name]["photometry"]:
try:
if sn_name=='SN2016esw':
ebv=0.246
if sn_name=='SN2013fs':
ebv=0.05
if sn_name=='SN2014cx':
ebv=0.1
if sn_name=='SN1987A':
ebv=0.19
if "e_magnitude" in dat:
error=float(dat["e_magnitude"])
else:
error=0
if dat["band"]==Band and Band != 'I':
if sn_name=='SN2013fs':
add = np.concatenate(([dat["time"], dat["magnitude"], error], [deredMag(float(dat["magnitude"]), float(ebv), coef[Band])-31.27,error]))
if sn_name=='SN2014cx':
if dat["source"]=='1':
add = np.concatenate(([dat["time"], dat["magnitude"], error], [deredMag(float(dat["magnitude"]), float(ebv), coef[Band])-31.27,error]))
if sn_name == 'SN1987A':
add = np.concatenate(([dat["time"], dat["magnitude"], error],
[deredMag(float(dat["magnitude"]), float(ebv), coef[Band]) - 18.56, error]))
elif sn_name=='SN1999em':
if dat["source"] == "20,31":
add = np.concatenate(([dat["time"], dat["magnitude"], error],
[ deredMag(float(dat["magnitude"]), float(ebv), coef[Band])-29.46,error]))
else:
add = np.concatenate(([dat["time"], dat["magnitude"], error]
, absMag(deredMag(float(dat["magnitude"]), float(ebv), coef[Band]),
float(redshift), error, 0)))
add=np.reshape(add,(1,5))
if sn_name=='SN2004er':
print add
mag=np.concatenate((mag,add),axis=0)
elif Band == 'I':
if sn_name!='SN1987A' and sn_name!='SN1999em' and sn_name!='SN2004ek' and sn_name!="SN1991al" and sn_name!="SN1992af" and sn_name!='SN1999em' and sn_name!='SN1999cr' and sn_name!='SN2009ib' and sn_name!='SN2013ej' and sn_name!='SN2005cs' and sn_name!='SN20009bw' and sn_name!='SN2013fs' and sn_name!='SN2016esw':
Band = 'i'
if dat["band"]==Band :
if sn_name == 'SN2014cx':
if dat["source"] == '1':
add = np.concatenate(([dat["time"], dat["magnitude"], error],
[deredMag(float(dat["magnitude"]), float(ebv), coef[Band]) - 31.27,
error]))
if sn_name == 'SN1987A':
add = np.concatenate(([dat["time"], dat["magnitude"], error],
[deredMag(float(dat["magnitude"]), float(ebv), coef[Band]) - 18.56,
error]))
elif sn_name == 'SN1999em':
if dat["source"]== "20,31":
add = np.concatenate(([dat["time"], dat["magnitude"], error],
[deredMag(float(dat["magnitude"]), float(ebv), coef[Band]) - 29.46, error]))
print add
else:
add = np.concatenate(([dat["time"], dat["magnitude"], error]
, absMag(deredMag(float(dat["magnitude"]), float(ebv), coef[Band]),
float(redshift), error, 0)))
add = np.reshape(add, (1, 5))
mag = np.concatenate((mag, add), axis=0)
Band='I'
except:
#print sn_name+" error in "
#print dat
continue
#print mag.shape
if Band == 'i':
Band = 'I'
np.save("phot_csv/compiledSN_"+Band+"_"+sn_name+".npy", mag) | bsd-3-clause |
SingTel-DataCo/incubator-superset | superset/sql_lab.py | 2 | 8487 | from time import sleep
from datetime import datetime
import json
import logging
import pandas as pd
import sqlalchemy
import uuid
from celery.exceptions import SoftTimeLimitExceeded
from sqlalchemy.pool import NullPool
from sqlalchemy.orm import sessionmaker
from superset import (
app, db, utils, dataframe, results_backend)
from superset.models.sql_lab import Query
from superset.sql_parse import SupersetQuery
from superset.db_engine_specs import LimitMethod
from superset.jinja_context import get_template_processor
from superset.utils import QueryStatus, get_celery_app
config = app.config
celery_app = get_celery_app(config)
stats_logger = app.config.get('STATS_LOGGER')
SQLLAB_TIMEOUT = config.get('SQLLAB_ASYNC_TIME_LIMIT_SEC', 600)
class SqlLabException(Exception):
pass
def dedup(l, suffix='__'):
"""De-duplicates a list of string by suffixing a counter
Always returns the same number of entries as provided, and always returns
unique values.
>>> dedup(['foo', 'bar', 'bar', 'bar'])
['foo', 'bar', 'bar__1', 'bar__2']
"""
new_l = []
seen = {}
for s in l:
if s in seen:
seen[s] += 1
s += suffix + str(seen[s])
else:
seen[s] = 0
new_l.append(s)
return new_l
def get_query(query_id, session, retry_count=5):
"""attemps to get the query and retry if it cannot"""
query = None
attempt = 0
while not query and attempt < retry_count:
try:
query = session.query(Query).filter_by(id=query_id).one()
except Exception:
attempt += 1
logging.error(
"Query with id `{}` could not be retrieved".format(query_id))
stats_logger.incr('error_attempting_orm_query_' + str(attempt))
logging.error("Sleeping for a sec before retrying...")
sleep(1)
if not query:
stats_logger.incr('error_failed_at_getting_orm_query')
raise SqlLabException("Failed at getting query")
return query
def get_session(nullpool):
if nullpool:
engine = sqlalchemy.create_engine(
app.config.get('SQLALCHEMY_DATABASE_URI'), poolclass=NullPool)
session_class = sessionmaker()
session_class.configure(bind=engine)
return session_class()
else:
session = db.session()
session.commit() # HACK
return session
@celery_app.task(bind=True, soft_time_limit=SQLLAB_TIMEOUT)
def get_sql_results(
ctask, query_id, return_results=True, store_results=False):
"""Executes the sql query returns the results."""
try:
return execute_sql(
ctask, query_id, return_results, store_results)
except Exception as e:
logging.exception(e)
stats_logger.incr('error_sqllab_unhandled')
sesh = get_session(not ctask.request.called_directly)
query = get_query(query_id, sesh)
query.error_message = str(e)
query.status = QueryStatus.FAILED
query.tmp_table_name = None
sesh.commit()
def execute_sql(ctask, query_id, return_results=True, store_results=False):
"""Executes the sql query returns the results."""
session = get_session(not ctask.request.called_directly)
query = get_query(query_id, session)
payload = dict(query_id=query_id)
database = query.database
db_engine_spec = database.db_engine_spec
db_engine_spec.patch()
def handle_error(msg):
"""Local method handling error while processing the SQL"""
query.error_message = msg
query.status = QueryStatus.FAILED
query.tmp_table_name = None
session.commit()
payload.update({
'status': query.status,
'error_essage': msg,
})
return payload
if store_results and not results_backend:
return handle_error("Results backend isn't configured.")
# Limit enforced only for retrieving the data, not for the CTA queries.
superset_query = SupersetQuery(query.sql)
executed_sql = superset_query.stripped()
if not superset_query.is_select() and not database.allow_dml:
return handle_error(
"Only `SELECT` statements are allowed against this database")
if query.select_as_cta:
if not superset_query.is_select():
return handle_error(
"Only `SELECT` statements can be used with the CREATE TABLE "
"feature.")
return
if not query.tmp_table_name:
start_dttm = datetime.fromtimestamp(query.start_time)
query.tmp_table_name = 'tmp_{}_table_{}'.format(
query.user_id,
start_dttm.strftime('%Y_%m_%d_%H_%M_%S'))
executed_sql = superset_query.as_create_table(query.tmp_table_name)
query.select_as_cta_used = True
elif (
query.limit and superset_query.is_select() and
db_engine_spec.limit_method == LimitMethod.WRAP_SQL):
executed_sql = database.wrap_sql_limit(executed_sql, query.limit)
query.limit_used = True
try:
template_processor = get_template_processor(
database=database, query=query)
executed_sql = template_processor.process_template(executed_sql)
executed_sql = db_engine_spec.sql_preprocessor(executed_sql)
except Exception as e:
logging.exception(e)
msg = "Template rendering failed: " + utils.error_msg_from_exception(e)
return handle_error(msg)
query.executed_sql = executed_sql
query.status = QueryStatus.RUNNING
query.start_running_time = utils.now_as_float()
session.merge(query)
session.commit()
logging.info("Set query to 'running'")
engine = database.get_sqla_engine(
schema=query.schema, nullpool=not ctask.request.called_directly)
try:
engine = database.get_sqla_engine(
schema=query.schema, nullpool=not ctask.request.called_directly)
conn = engine.raw_connection()
cursor = conn.cursor()
logging.info("Running query: \n{}".format(executed_sql))
logging.info(query.executed_sql)
cursor.execute(
query.executed_sql, **db_engine_spec.cursor_execute_kwargs)
logging.info("Handling cursor")
db_engine_spec.handle_cursor(cursor, query, session)
logging.info("Fetching data: {}".format(query.to_dict()))
data = db_engine_spec.fetch_data(cursor, query.limit)
except SoftTimeLimitExceeded as e:
logging.exception(e)
conn.close()
return handle_error(
"SQL Lab timeout. This environment's policy is to kill queries "
"after {} seconds.".format(SQLLAB_TIMEOUT))
except Exception as e:
logging.exception(e)
conn.close()
return handle_error(db_engine_spec.extract_error_message(e))
conn.commit()
conn.close()
if query.status == utils.QueryStatus.STOPPED:
return json.dumps({
'query_id': query.id,
'status': query.status,
'query': query.to_dict(),
}, default=utils.json_iso_dttm_ser)
column_names = (
[col[0] for col in cursor.description] if cursor.description else [])
column_names = dedup(column_names)
cdf = dataframe.SupersetDataFrame(pd.DataFrame(
list(data), columns=column_names))
query.rows = cdf.size
query.progress = 100
query.status = QueryStatus.SUCCESS
if query.select_as_cta:
query.select_sql = '{}'.format(database.select_star(
query.tmp_table_name,
limit=query.limit,
schema=database.force_ctas_schema,
show_cols=False,
latest_partition=False,
))
query.end_time = utils.now_as_float()
session.merge(query)
session.flush()
payload.update({
'status': query.status,
'data': cdf.data if cdf.data else [],
'columns': cdf.columns if cdf.columns else [],
'query': query.to_dict(),
})
if store_results:
key = '{}'.format(uuid.uuid4())
logging.info("Storing results in results backend, key: {}".format(key))
json_payload = json.dumps(payload, default=utils.json_iso_dttm_ser)
results_backend.set(key, utils.zlib_compress(json_payload))
query.results_key = key
query.end_result_backend_time = utils.now_as_float()
session.merge(query)
session.commit()
if return_results:
return payload
| apache-2.0 |
tonysyu/mpltools | mpltools/style/core.py | 2 | 3069 | import os
import glob
import copy
import numpy as np
import matplotlib.pyplot as plt
from .. import _config
__all__ = ['use', 'available', 'lib', 'baselib']
def use(name=None, use_baselib=False):
"""Use matplotlib rc parameters from a pre-defined name or from a file.
Parameters
----------
name : str or list of str
Name of style. For list of available styles see `style.available`.
If given a list, each style is applied from first to last in the list.
use_baselib : bool
If True, only use styles defined in `mpltools/style` (without user's
customization).
"""
if np.isscalar(name):
name = [name]
for s in name:
if use_baselib:
plt.rcParams.update(baselib[s])
else:
plt.rcParams.update(lib[s])
def load_base_library():
"""Load style library from package"""
library = dict()
style_dir = os.path.abspath(os.path.dirname(__file__))
library.update(read_style_directory(style_dir))
return library
def update_user_library(base_library):
"""Update style library with user-defined rc files"""
library = copy.deepcopy(base_library)
stylelib_path = os.path.expanduser('~/.mplstylelib')
if os.path.exists(stylelib_path) and os.path.isdir(stylelib_path):
styles = read_style_directory(stylelib_path)
update_nested_dict(library, styles)
for cfg in _config.iter_paths(['~/.mplstyle', './mplstyle']):
styles = read_style_dict(cfg)
update_nested_dict(library, styles)
return library
def read_style_directory(style_dir):
styles = dict()
library_glob = os.path.join(style_dir, '*.rc')
style_files = glob.glob(library_glob)
for style_path in style_files:
filename = os.path.basename(style_path)
cfg = _config.read(style_path)
# remove last three letters, which are '.rc'
styles[filename[:-3]] = cfg.dict()
return styles
def read_style_dict(cfg):
"""Return dict of styles read from config dict.
Sections in style file are set as top-level keys of the returned dict.
"""
style = {}
# update all settings with any global settings.
if 'global' in cfg:
cfg_global = cfg.pop('global')
for rc_dict in style.values():
rc_dict.update(cfg_global)
return update_nested_dict(style, cfg)
def update_nested_dict(main_dict, new_dict):
"""Update nested dict (only level of nesting) with new values.
Unlike dict.update, this assumes that the values of the parent dict are
dicts, so you shouldn't replace the nested dict if it already exists.
Instead you should update the sub-dict.
"""
# update named styles specified by user
for name, rc_dict in new_dict.items():
if name in main_dict:
main_dict[name].update(rc_dict)
else:
main_dict[name] = rc_dict
return main_dict
# Load style libraries
# ====================
baselib = load_base_library()
lib = update_user_library(baselib)
available = list(lib.keys())
| bsd-3-clause |
flightgong/scikit-learn | examples/feature_stacker.py | 14 | 1941 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
# Classify:
svm = SVC(kernel="linear")
svm.fit(X_features, y)
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
alimuldal/numpy | numpy/lib/function_base.py | 6 | 164887 | from __future__ import division, absolute_import, print_function
import collections
import operator
import re
import sys
import warnings
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d, transpose
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar, absolute
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import (
_insert, add_docstring, digitize, bincount,
interp as compiled_interp, interp_complex as compiled_interp_complex
)
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
from numpy.compat.py3k import basestring
if sys.version_info[0] < 3:
# Force range to be a generator, for np.delete's usage.
range = xrange
import __builtin__ as builtins
else:
import builtins
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip',
'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def rot90(m, k=1, axes=(0,1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
.. versionadded:: 1.12.0
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes: (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : ndarray
A rotated view of `m`.
See Also
--------
flip : Reverse the order of elements in an array along the given axis.
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Notes
-----
rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
"""
axes = tuple(axes)
if len(axes) != 2:
raise ValueError("len(axes) must be 2.")
m = asanyarray(m)
if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim:
raise ValueError("Axes must be different.")
if (axes[0] >= m.ndim or axes[0] < -m.ndim
or axes[1] >= m.ndim or axes[1] < -m.ndim):
raise ValueError("Axes={} out of range for array of ndim={}."
.format(axes, m.ndim))
k %= 4
if k == 0:
return m[:]
if k == 2:
return flip(flip(m, axes[0]), axes[1])
axes_list = arange(0, m.ndim)
axes_list[axes[0]], axes_list[axes[1]] = axes_list[axes[1]], axes_list[axes[0]]
if k == 1:
return transpose(flip(m,axes[1]), axes_list)
else:
# k == 3
return flip(transpose(m, axes_list), axes[1])
def flip(m, axis):
"""
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
.. versionadded:: 1.12.0
Parameters
----------
m : array_like
Input array.
axis : integer
Axis in array, which entries are reversed.
Returns
-------
out : array_like
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
See Also
--------
flipud : Flip an array vertically (axis=0).
fliplr : Flip an array horizontally (axis=1).
Notes
-----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> A = np.random.randn(3,4,5)
>>> np.all(flip(A,2) == A[:,:,::-1,...])
True
"""
if not hasattr(m, 'ndim'):
m = asarray(m)
indexer = [slice(None)] * m.ndim
try:
indexer[axis] = slice(None, None, -1)
except IndexError:
raise ValueError("axis=%i is invalid for the %i-dimensional input array"
% (axis, m.ndim))
return m[tuple(indexer)]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : bool
Return ``True`` if the object has an iterator method or is a
sequence and ``False`` otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
True
>>> np.iterable(2)
False
"""
try:
iter(y)
except TypeError:
return False
return True
def _hist_bin_sqrt(x):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / np.sqrt(x.size)
def _hist_bin_sturges(x):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_doane(x):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
http://stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return x.ptp() / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x`. The Sturges estimator
is quite good for small (<1000) datasets and is the default in the R
language. This method gives good off the shelf behaviour.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
# There is no need to check for zero here. If ptp is, so is IQR and
# vice versa. Either both are zero or neither one is.
return min(_hist_bin_fd(x), _hist_bin_sturges(x))
# Private dict initialized at module load time
_hist_bin_selectors = {'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy
behavior. It will be removed in NumPy 2.0.0. Use the ``density``
keyword instead. If ``False``, the result will contain the
number of samples in each bin. If ``True``, the result is the
value of the probability *density* function at the bin,
normalized such that the *integral* over the range is 1. Note
that this latter behavior is known to be buggy with unequal bin
widths; use ``density`` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
.. versionadded:: 1.11.0
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))`.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'Rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'Sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'Doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'Sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> plt.hist(a, bins='auto') # plt.hist passes it's arguments to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
# Do not modify the original value of range so we can check for `None`
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
mn, mx = 0.0, 1.0
else:
mn, mx = a.min() + 0.0, a.max() + 0.0
else:
mn, mx = [mi + 0.0 for mi in range]
if mn > mx:
raise ValueError(
'max must be larger than min in range parameter.')
if not np.all(np.isfinite([mn, mx])):
raise ValueError(
'range parameter must be finite.')
if mn == mx:
mn -= 0.5
mx += 0.5
if isinstance(bins, basestring):
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bins not in _hist_bin_selectors:
raise ValueError("{0} not a valid estimator for bins".format(bins))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
# Make a reference to `a`
b = a
# Update the reference if the range needs truncation
if range is not None:
keep = (a >= mn)
keep &= (a <= mx)
if not np.logical_and.reduce(keep):
b = a[keep]
if b.size == 0:
bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bins](b)
if width:
bins = int(np.ceil((mx - mn) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
bins = 1
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# Compute the bin edges for potential correction.
bin_edges = linspace(mn, mx, bins + 1, endpoint=True)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a_data = tmp_a.astype(float)
tmp_a = tmp_a_data - mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a_data < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = (tmp_a_data >= bin_edges[indices + 1]) & (indices != bins - 1)
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
# Rename the bin edges for return.
bins = bin_edges
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise ValueError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for NumPy 2.0.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
if not np.all(np.isfinite(range)):
raise ValueError(
'range parameter must be finite.')
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
# 3/19/2016 1.12.0:
# replace the next few lines with "a = np.asanyarray(a)"
if (type(a) not in (np.ndarray, np.matrix) and
issubclass(type(a), np.ndarray)):
warnings.warn("np.average currently does not preserve subclasses, but "
"will do so in the future to match the behavior of most "
"other numpy functions such as np.mean. In particular, "
"this means calls which returned a scalar may return a "
"0-d subclass object instead.",
FutureWarning, stacklevel=2)
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
wgt = np.asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = np.result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=result_dtype)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
if returned:
if scl.shape != avg.shape:
scl = np.broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print('ValueError')
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., alpha=1)``, then each function is called as
``f(x, alpha=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
# Only able to stack vertically if the array is 1d or less
if x.ndim <= 1:
condlist = np.vstack([condlist, ~totlist])
else:
condlist = [asarray(c, dtype=bool) for c in condlist]
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning, stacklevel=2)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it separately optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : scalar or list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
single scalar specifies sample distance for all dimensions.
if `axis` is given, the number of varargs must equal the number of axes.
edge_order : {1, 2}, optional
Gradient is calculated using N-th order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes of the input array.
axis may be negative, in which case it counts from the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : ndarray or list of ndarray
A set of ndarrays (or a single ndarray if there is only one dimension)
correposnding to the derivatives of f with respect to each dimension.
Each derivative has the same shape as f.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> y = x**2
>>> np.gradient(y, edge_order=2)
array([-0., 2., 4., 6., 8.])
The axis keyword can be used to specify a subset of axes of which the gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
axes = kwargs.pop('axis', None)
if axes is None:
axes = tuple(range(N))
# check axes to have correct type and no duplicate entries
if isinstance(axes, int):
axes = (axes,)
if not isinstance(axes, tuple):
raise TypeError("A tuple of integers or a single integer is required")
# normalize axis values:
axes = tuple(x + N if x < 0 else x for x in axes)
if max(axes) >= N or min(axes) < 0:
raise ValueError("'axis' entry is out of bounds")
if len(set(axes)) != len(axes):
raise ValueError("duplicate value in 'axis'")
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == len(axes):
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
if any([not np.isscalar(dxi) for dxi in dx]):
raise ValueError("distances must be scalars")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for i, axis in enumerate(axes):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[i]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len(axes) == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th discrete difference along given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of float or complex
The y-coordinates of the data points, same length as `xp`.
left : optional float or complex corresponding to fp
Value to return for `x < xp[0]`, default is `fp[0]`.
right : optional float or complex corresponding to fp
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or complex (corresponding to fp) or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
Complex interpolation
>>> x = [1.5, 4.0]
>>> xp = [2,3,5]
>>> fp = [1.0j, 0, 2+3j]
>>> np.interp(x, xp, fp)
array([ 0.+1.j , 1.+1.5j])
"""
fp = np.asarray(fp)
if np.iscomplexobj(fp):
interp_func = compiled_interp_complex
input_dtype = np.complex128
else:
interp_func = compiled_interp
input_dtype = np.float64
if period is None:
if isinstance(x, (float, int, number)):
return interp_func([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return interp_func([x], xp, fp, left, right).item()
else:
return interp_func(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=input_dtype)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return interp_func(x, xp, fp, left, right)
else:
return interp_func(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : ndarray
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N, it will be repeated, and if elements of `a` are to be masked,
this sequence must be non-empty.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
if not isinstance(arr, np.ndarray):
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(arr).__name__))
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
# See http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
_DIMENSION_NAME = r'\w+'
_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME)
_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST)
_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT)
_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST)
def _parse_gufunc_signature(signature):
"""
Parse string signatures for a generalized universal function.
Arguments
---------
signature : string
Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)``
for ``np.matmul``.
Returns
-------
Tuple of input and output core dimensions parsed from the signature, each
of the form List[Tuple[str, ...]].
"""
if not re.match(_SIGNATURE, signature):
raise ValueError(
'not a valid gufunc signature: {}'.format(signature))
return tuple([tuple(re.findall(_DIMENSION_NAME, arg))
for arg in re.findall(_ARGUMENT, arg_list)]
for arg_list in signature.split('->'))
def _update_dim_sizes(dim_sizes, arg, core_dims):
"""
Incrementally check and update core dimension sizes for a single argument.
Arguments
---------
dim_sizes : Dict[str, int]
Sizes of existing core dimensions. Will be updated in-place.
arg : ndarray
Argument to examine.
core_dims : Tuple[str, ...]
Core dimensions for this argument.
"""
if not core_dims:
return
num_core_dims = len(core_dims)
if arg.ndim < num_core_dims:
raise ValueError(
'%d-dimensional argument does not have enough '
'dimensions for all core dimensions %r'
% (arg.ndim, core_dims))
core_shape = arg.shape[-num_core_dims:]
for dim, size in zip(core_dims, core_shape):
if dim in dim_sizes:
if size != dim_sizes[dim]:
raise ValueError(
'inconsistent size for core dimension %r: %r vs %r'
% (dim, size, dim_sizes[dim]))
else:
dim_sizes[dim] = size
def _parse_input_dimensions(args, input_core_dims):
"""
Parse broadcast and core dimensions for vectorize with a signature.
Arguments
---------
args : Tuple[ndarray, ...]
Tuple of input arguments to examine.
input_core_dims : List[Tuple[str, ...]]
List of core dimensions corresponding to each input.
Returns
-------
broadcast_shape : Tuple[int, ...]
Common shape to broadcast all non-core dimensions to.
dim_sizes : Dict[str, int]
Common sizes for named core dimensions.
"""
broadcast_args = []
dim_sizes = {}
for arg, core_dims in zip(args, input_core_dims):
_update_dim_sizes(dim_sizes, arg, core_dims)
ndim = arg.ndim - len(core_dims)
dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim])
broadcast_args.append(dummy_array)
broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args)
return broadcast_shape, dim_sizes
def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims):
"""Helper for calculating broadcast shapes with core dimensions."""
return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims)
for core_dims in list_of_core_dims]
def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes):
"""Helper for creating output arrays in vectorize."""
shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims)
arrays = tuple(np.empty(shape, dtype=dtype)
for shape, dtype in zip(shapes, dtypes))
return arrays
class vectorize(object):
"""
vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False,
signature=None)
Generalized function class.
Define a vectorized function which takes a nested sequence of objects or
numpy arrays as inputs and returns an single or tuple of numpy array as
output. The vectorized function evaluates `pyfunc` over successive tuples
of the input arrays like the python map function, except it uses the
broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
signature : string, optional
Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for
vectorized matrix-vector multiplication. If provided, ``pyfunc`` will
be called with (and expected to return) arrays with shapes given by the
size of corresponding core dimensions. By default, ``pyfunc`` is
assumed to take scalars as input and output.
.. versionadded:: 1.12.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified:
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified:
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
The `signature` argument allows for vectorizing functions that act on
non-scalar arrays of fixed length. For example, you can use it for a
vectorized calculation of Pearson correlation coefficient and its p-value:
>>> import scipy.stats
>>> pearsonr = np.vectorize(scipy.stats.pearsonr,
... signature='(n),(n)->(),()')
>>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]])
(array([ 1., -1.]), array([ 0., 0.]))
Or for a vectorized convolution:
>>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)')
>>> convolve(np.eye(4), [1, 2, 1])
array([[ 1., 2., 1., 0., 0., 0.],
[ 0., 1., 2., 1., 0., 0.],
[ 0., 0., 1., 2., 1., 0.],
[ 0., 0., 0., 1., 2., 1.]])
See Also
--------
frompyfunc : Takes an arbitrary Python function and returns a ufunc
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
References
----------
.. [1] NumPy Reference, section `Generalized Universal Function API
<http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_.
"""
def __init__(self, pyfunc, otypes=None, doc=None, excluded=None,
cache=False, signature=None):
self.pyfunc = pyfunc
self.cache = cache
self.signature = signature
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
for char in otypes:
if char not in typecodes['All']:
raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
otypes = ''.join([_nx.dtype(x).char for x in otypes])
elif otypes is not None:
raise ValueError("Invalid otype specification")
self.otypes = otypes
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
if signature is not None:
self._in_and_out_core_dims = _parse_gufunc_signature(signature)
else:
self._in_and_out_core_dims = None
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes is not None:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
args = [asarray(arg) for arg in args]
if builtins.any(arg.size == 0 for arg in args):
raise ValueError('cannot call `vectorize` on size 0 inputs '
'unless `otypes` is set')
inputs = [arg.flat[0] for arg in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if self.signature is not None:
res = self._vectorize_call_with_signature(func, args)
elif not args:
res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(a, copy=False, subok=True, dtype=object)
for a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
res = array(outputs, copy=False, subok=True, dtype=otypes[0])
else:
res = tuple([array(x, copy=False, subok=True, dtype=t)
for x, t in zip(outputs, otypes)])
return res
def _vectorize_call_with_signature(self, func, args):
"""Vectorized call over positional arguments with a signature."""
input_core_dims, output_core_dims = self._in_and_out_core_dims
if len(args) != len(input_core_dims):
raise TypeError('wrong number of positional arguments: '
'expected %r, got %r'
% (len(input_core_dims), len(args)))
args = tuple(asanyarray(arg) for arg in args)
broadcast_shape, dim_sizes = _parse_input_dimensions(
args, input_core_dims)
input_shapes = _calculate_shapes(broadcast_shape, dim_sizes,
input_core_dims)
args = [np.broadcast_to(arg, shape, subok=True)
for arg, shape in zip(args, input_shapes)]
outputs = None
otypes = self.otypes
nout = len(output_core_dims)
for index in np.ndindex(*broadcast_shape):
results = func(*(arg[index] for arg in args))
n_results = len(results) if isinstance(results, tuple) else 1
if nout != n_results:
raise ValueError(
'wrong number of outputs from pyfunc: expected %r, got %r'
% (nout, n_results))
if nout == 1:
results = (results,)
if outputs is None:
for result, core_dims in zip(results, output_core_dims):
_update_dim_sizes(dim_sizes, result, core_dims)
if otypes is None:
otypes = [asarray(result).dtype for result in results]
outputs = _create_arrays(broadcast_shape, dim_sizes,
output_core_dims, otypes)
for output, result in zip(outputs, results):
output[index] = result
if outputs is None:
# did not call the function even once
if otypes is None:
raise ValueError('cannot call `vectorize` on size 0 inputs '
'unless `otypes` is set')
if builtins.any(dim not in dim_sizes
for dims in output_core_dims
for dim in dims):
raise ValueError('cannot call `vectorize` with a signature '
'including new output dimensions on size 0 '
'inputs')
outputs = _create_arrays(broadcast_shape, dim_sizes,
output_core_dims, otypes)
return outputs[0] if nout == 1 else outputs
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N - 1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print(np.cov(X))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x, y))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print(np.cov(x))
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions")
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
if y.ndim > 2:
raise ValueError("y has more than 2 dimensions")
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = X.shape[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
c = dot(X, X_T.conj())
c *= 1. / np.float64(fact)
return c.squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
Due to floating point rounding the resulting array may not be Hermitian,
the diagonal elements may not be 1, and the elements may not satisfy the
inequality abs(a) <= 1. The real and imaginary parts are clipped to the
interval [-1, 1] in an attempt to improve on that situation but is not
much help in the complex case.
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
DeprecationWarning, stacklevel=2)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError:
# scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
stddev = sqrt(d.real)
c /= stddev[:, None]
c /= stddev[None, :]
# Clip real and imaginary parts to [-1, 1]. This does not guarantee
# abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
# excessive work.
np.clip(c.real, -1, 1, out=c.real)
if np.iscomplexobj(c):
np.clip(c.imag, -1, 1, out=c.imag)
return c
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function capable of receiving a single axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i
e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
two middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact) and sz > 0:
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning, stacklevel=3)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning, stacklevel=3)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute, which must be between 0 and 100 inclusive.
axis : {int, sequence of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array. A sequence of axes is supported since
version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a`
calculations. The input array will be modified by the call to
`percentile`. This will save memory when you do not need to
preserve the contents of the input array. In this case you
should not make any assumptions about the contents of the input
`a` after this function completes -- treat it as undefined.
Default is False. If `a` is not already an array, this parameter
will have no effect as `a` will be converted to an array
internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, median, nanpercentile
Notes
-----
Given a vector ``V`` of length ``N``, the ``q``-th percentile of
``V`` is the value ``q/100`` of the way from the mimumum to the
maximum in in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
will determine the percentile if the normalized ranking does not
match the location of ``q`` exactly. This function is the same as
the median if ``q=50``, the same as the minimum if ``q=0`` and the
same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a == b)
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = 0.5 * (floor(indices) + ceil(indices))
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in percentile",
RuntimeWarning, stacklevel=3)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
The sample points corresponding to the `y` values. If `x` is None,
the sample points are assumed to be evenly spaced `dx` apart. The
default is None.
dx : scalar, optional
The spacing between sample points when `x` is None. The default is 1.
axis : int, optional
The axis along which to integrate.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""
Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning, stacklevel=2)
if wrap:
return wrap(arr)
else:
return arr.copy(order=arrorder)
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy(order=arrorder))
else:
return arr.copy(order=arrorder)
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arrorder)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning, stacklevel=2)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning, stacklevel=2)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning, stacklevel=2)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning, stacklevel=2)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning, stacklevel=2)
arr = arr.copy(order=arrorder)
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning, stacklevel=2)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning, stacklevel=2)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arrorder)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| bsd-3-clause |
FrederichRiver/neutrino | applications/venus/venus/stock_flag.py | 1 | 2566 | #!/usr/bin/python3
from venus.stock_base import StockEventBase
class EventStockFlag(StockEventBase):
def flag_quit_stock(self, stock_code):
import datetime
import pandas as pd
from datetime import date
from dev_global.env import TIME_FMT
result = self.mysql.select_values(stock_code, 'trade_date')
if not result.empty:
result = result[0].tolist()
d = datetime.date.today() - result[-1]
if d.days > 150:
return True
else:
return False
else:
return False
def flag_index(self, stock_code):
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code,
formStockManager.flag
).filter_by(stock_code=stock_code)
if result:
result.update(
{"flag": 'i'}
)
self.mysql.session.commit()
return 1
def flag_stock(self, stock_code):
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code,
formStockManager.flag
).filter_by(stock_code=stock_code)
if result:
result.update(
{"flag": 't'}
)
self.mysql.session.commit()
return 1
def flag_b_stock(self, stock_code):
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code,
formStockManager.flag
).filter_by(stock_code=stock_code)
if result:
result.update(
{"flag": 'b'}
)
self.mysql.session.commit()
return 1
def flag_hk_stock(self, stock_code):
from venus.form import formStockManager
result = self.mysql.session.query(
formStockManager.stock_code,
formStockManager.flag
).filter_by(stock_code=stock_code)
if result:
result.update(
{"flag": 'h'}
)
self.mysql.session.commit()
return 1
if __name__ == "__main__":
import re
from dev_global.env import GLOBAL_HEADER
from venus.stock_flag import EventStockFlag
event = EventStockFlag(GLOBAL_HEADER)
stock_list = event.get_all_security_list()
for stock_code in stock_list:
if re.match(r'^SH000|^SH950|^SZ399', stock_code):
event.flag_index(stock_code)
| bsd-3-clause |
elijah513/scikit-learn | sklearn/linear_model/tests/test_base.py | 120 | 10082 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| bsd-3-clause |
xzh86/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <florian.wilhelm@gmail.com>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
jereze/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 76 | 45197 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
Frank-Wu/stratosphere-streaming | flink-streaming-connectors/src/test/resources/Performance/PerformanceTracker.py | 2 | 4050 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 30 15:40:17 2014
@author: gyfora
"""
import matplotlib.pyplot as plt
import pandas as pd
import os
import operator
linestyles = ['_', '-', '--', ':']
markers=['D','s', '|', '', 'x', '_', '^', ' ', 'd', 'h', '+', '*', ',', 'o', '.', '1', 'p', 'H', 'v', '>'];
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
def readFiles(csv_dir):
counters=[]
for fname in os.listdir(csv_dir):
if '.csv' in fname:
counters.append((fname.rstrip('.csv'),int(fname.rstrip('.csv').split('-')[-1])-1,pd.read_csv(os.path.join(csv_dir,fname),index_col='Time')))
return counters
def plotCounter(csv_dir, sname='', smooth=5,savePath=''):
counters= readFiles(csv_dir)
addSpeed(counters)
selectedCounters=[]
for (name, number, df) in counters:
if sname in name:
selectedCounters.append((name, number, df))
if sname=='':
sname='counters'
save=savePath!=''
plotDfs(selectedCounters,smooth,save,savePath+'/'+sname)
def plotDfs(counters,smooth,save,saveFile):
plt.figure(figsize=(12, 8), dpi=80)
plt.title('Counter')
for (name, number, df) in counters:
m=markers[number%len(markers)]
df.ix[:,0].plot(marker=m,markevery=10,markersize=10)
plt.legend([x[0] for x in counters])
if save:
plt.savefig(saveFile+'C.png')
plt.figure(figsize=(12, 8), dpi=80)
plt.title('dC/dT')
for (name, number, df) in counters:
m=markers[number%len(markers)]
pd.rolling_mean(df.speed,smooth).plot(marker=m,markevery=10,markersize=10)
plt.legend([x[0] for x in counters])
if save:
plt.savefig(saveFile+'D.png')
def addSpeed(counters):
for (tname, number, df) in counters:
speed=[0]
values=list(df.ix[:,0])
for i in range(1,len(values)):
speed.append(float(values[i]-values[i-1])/float(df.index[i]-df.index[i-1]+0.01))
df['speed']=speed
return counters
def plotThroughput(csv_dir,tasknames, smooth=5,savePath=''):
if type(tasknames)!=list:
tasknames=[tasknames]
for taskname in tasknames:
counters= readFiles(csv_dir)
addSpeed(counters)
selected={}
for (tname, number, df) in counters:
if taskname in tname:
if number in selected:
selected[number].append(df)
else:
selected[number]=[df]
plt.figure()
plt.title(taskname)
for i in selected:
if len(selected[i])>1:
selected[i]=reduce(operator.add,selected[i])
else:
selected[i]=selected[i][0]
m=markers[i%len(markers)]
selected[i].ix[:,0].plot(marker=m,markevery=10,markersize=10)
plt.legend(selected.keys())
if savePath !='':
plt.savefig(savePath+'/'+taskname+'C.png')
plt.figure()
plt.title(taskname+" - dC/dT")
for i in selected:
m=markers[i%len(markers)]
pd.rolling_mean(selected[i].speed,smooth).plot(marker=m,markevery=10,markersize=10)
plt.legend(selected.keys())
if savePath !='':
plt.savefig(savePath+'/'+taskname+'D.png')
def plotTimer(csv_dir,smooth=5,std=50):
dataframes= readFiles(csv_dir)
plt.figure(figsize=(12, 8), dpi=80)
plt.title('Timer')
for dataframe in dataframes:
m=markers[dataframe[1]%len(markers)]
pd.rolling_mean(dataframe[2].ix[:,0],smooth).plot(marker=m,markevery=10,markersize=10)
plt.legend([x[0] for x in dataframes])
plt.figure(figsize=(12, 8), dpi=80)
plt.title('Standard deviance')
for dataframe in dataframes:
m=markers[dataframe[1]%len(markers)]
pd.rolling_std(dataframe[2].ix[:,0],std).plot(marker=m,markevery=10,markersize=10)
plt.legend([x[0] for x in dataframes])
| apache-2.0 |
schoolie/bokeh | bokeh/charts/builders/horizon_builder.py | 6 | 6668 | """This is the Bokeh charts interface. It gives you a high level API
to build complex plot is a simple way.
This is the Horizon class which lets you build your Horizon charts
just passing the arguments to the Chart class and calling the proper
functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from bokeh.charts.builder import create_and_build
from bokeh.charts.glyphs import HorizonGlyph
from .line_builder import LineBuilder
from ...core.properties import Float, Int, List, string_types, String, Color, Bool
from ..attributes import ColorAttr, IdAttr
from ...models.sources import ColumnDataSource
from ...models.axes import CategoricalAxis
from ...models.ranges import FactorRange, DataRange1d
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Horizon(data=None, x=None, y=None, series=None, **kws):
""" Create a horizon chart using :class:`HorizonBuilder
<bokeh.charts.builders.scatter_builder.HorizonBuilder>`
to render the geometry from values.
Args:
data (:ref:`userguide_charts_data_types`): table-like data
x (str or list(str), optional): the column label to use for the x dimension
y (str or list(str), optional): the column label to use for the y dimension
In addition to the parameters specific to this chart,
:ref:`userguide_charts_defaults` are also accepted as keyword parameters.
Returns:
:class:`Chart`: includes glyph renderers that generate the scatter points
Examples:
.. bokeh-plot::
:source-position: above
import pandas as pd
from bokeh.charts import Horizon, output_file, show
# read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
data = dict([
('AAPL', AAPL['Adj Close']),
('Date', AAPL['Date']),
('MSFT', MSFT['Adj Close']),
('IBM', IBM['Adj Close'])]
)
hp = Horizon(data, x='Date', plot_width=800, plot_height=300,
title="horizon plot using stock inputs")
output_file("horizon.html")
show(hp)
"""
kws['x'] = x
kws['y'] = y
kws['series'] = series
tools = kws.get('tools', True)
if tools == True:
tools = "save,reset"
elif isinstance(tools, string_types):
tools = tools.replace('pan', '')
tools = tools.replace('wheel_zoom', '')
tools = tools.replace('box_zoom', '')
tools = tools.replace(',,', ',')
kws['tools'] = tools
chart = create_and_build(HorizonBuilder, data, **kws)
# Hide numerical axis
chart.left[0].visible = False
# Add the series names to the y axis
chart.extra_y_ranges = {"series": FactorRange(factors=chart._builders[0].series_names)}
chart.add_layout(CategoricalAxis(y_range_name="series"), 'left')
return chart
class HorizonBuilder(LineBuilder):
"""Produces glyph renderers representing a horizon chart from many input types.
The builder handles ingesting the data, deriving settings when not provided,
building the renderers, then setting ranges, and modifying the chart as needed.
"""
# class configuration
glyph = HorizonGlyph
default_attributes = {'color': ColorAttr(sort=False),
'series': IdAttr(sort=False)}
# primary input properties
pos_color = Color("#006400", help="""
The color of the positive folds. (default: "#006400")
""")
neg_color = Color("#6495ed", help="""
The color of the negative folds. (default: "#6495ed")
""")
num_folds = Int(3, help="""
The number of folds stacked on top of each other. (default: 3)
""")
flip_neg = Bool(default=True, help="""When True, the negative values will be
plotted as their absolute value, then their individual axes is flipped. If False,
then the negative values will still be taken as their absolute value, but the base
of their shape will start from the same origin as the positive values.
""")
# derived properties
series_count = Int(help="""Count of the unique series names.""")
bins = List(Float, help="""The binedges calculated from the number of folds,
and the maximum value of the entire source data.""")
series_column = String(help="""The column that contains the series names.""")
fold_height = Float(help="""The size of the bin.""")
def setup(self):
super(HorizonBuilder, self).setup()
# collect series names and columns selected to color by
if self.attributes['series'].columns is None:
self.series_column = self.attributes['color'].columns[0]
else:
self.series_column = self.attributes['series'].columns[0]
if len(self.series_names) == 0:
self.set_series(self.series_column)
self.series_count = len(self.series_names)
def process_data(self):
super(HorizonBuilder, self).process_data()
# calculate group attributes, useful for each horizon glyph
self.fold_height = max(self.y.max, abs(self.y.min))/self.num_folds
self.bins = [bin_id * self.fold_height for bin_id in range(self.num_folds + 1)]
# manually set attributes to have constant color
ds = ColumnDataSource(self._data.df)
self.attributes['series'].setup(data=ds, columns=self.series_column)
self.attributes['color'].setup(data=ds, columns=self.pos_color)
def set_ranges(self):
super(HorizonBuilder, self).set_ranges()
self.x_range = DataRange1d(range_padding=0)
self.y_range.start = 0
self.y_range.end = self.y.max
| bsd-3-clause |
deepchem/deepchem | deepchem/data/data_loader.py | 2 | 47681 | """
Process an input dataset into a format suitable for machine learning.
"""
import os
import tempfile
import zipfile
import time
import logging
import warnings
from typing import List, Optional, Tuple, Any, Sequence, Union, Iterator
import pandas as pd
import numpy as np
from deepchem.utils.typing import OneOrMany
from deepchem.utils.data_utils import load_image_files, load_csv_files, load_json_files, load_sdf_files
from deepchem.feat import UserDefinedFeaturizer, Featurizer
from deepchem.data import Dataset, DiskDataset, NumpyDataset, ImageDataset
from deepchem.feat.molecule_featurizers import OneHotFeaturizer
from deepchem.utils.genomics_utils import encode_bio_sequence
logger = logging.getLogger(__name__)
def _convert_df_to_numpy(df: pd.DataFrame,
tasks: List[str]) -> Tuple[np.ndarray, np.ndarray]:
"""Transforms a dataframe containing deepchem input into numpy arrays
This is a private helper method intended to help parse labels and
weights arrays from a pandas dataframe. Here `df` is a dataframe
which has columns for each task in `tasks`. These labels are
extracted into a labels array `y`. Weights `w` are initialized to
all ones, but weights for any missing labels are set to 0.
Parameters
----------
df: pd.DataFrame
Pandas dataframe with columns for all tasks
tasks: List[str]
List of tasks
Returns
-------
Tuple[np.ndarray, np.ndarray]
The tuple is `(w, y)`.
"""
n_samples = df.shape[0]
n_tasks = len(tasks)
y = np.hstack(
[np.reshape(np.array(df[task].values), (n_samples, 1)) for task in tasks])
w = np.ones((n_samples, n_tasks))
if y.dtype.kind in ['O', 'U']:
missing = (y == '')
y[missing] = 0
w[missing] = 0
return y.astype(float), w.astype(float)
class DataLoader(object):
"""Handles loading/featurizing of data from disk.
The main use of `DataLoader` and its child classes is to make it
easier to load large datasets into `Dataset` objects.`
`DataLoader` is an abstract superclass that provides a
general framework for loading data into DeepChem. This class should
never be instantiated directly. To load your own type of data, make
a subclass of `DataLoader` and provide your own implementation for
the `create_dataset()` method.
To construct a `Dataset` from input data, first instantiate a
concrete data loader (that is, an object which is an instance of a
subclass of `DataLoader`) with a given `Featurizer` object. Then
call the data loader's `create_dataset()` method on a list of input
files that hold the source data to process. Note that each subclass
of `DataLoader` is specialized to handle one type of input data so
you will have to pick the loader class suitable for your input data
type.
Note that it isn't necessary to use a data loader to process input
data. You can directly use `Featurizer` objects to featurize
provided input into numpy arrays, but note that this calculation
will be performed in memory, so you will have to write generators
that walk the source files and write featurized data to disk
yourself. `DataLoader` and its subclasses make this process easier
for you by performing this work under the hood.
"""
def __init__(self,
tasks: List[str],
featurizer: Featurizer,
id_field: Optional[str] = None,
log_every_n: int = 1000):
"""Construct a DataLoader object.
This constructor is provided as a template mainly. You
shouldn't ever call this constructor directly as a user.
Parameters
----------
tasks: List[str]
List of task names
featurizer: Featurizer
Featurizer to use to process data.
id_field: str, optional (default None)
Name of field that holds sample identifier. Note that the
meaning of "field" depends on the input data type and can have a
different meaning in different subclasses. For example, a CSV
file could have a field as a column, and an SDF file could have
a field as molecular property.
log_every_n: int, optional (default 1000)
Writes a logging statement this often.
"""
if self.__class__ is DataLoader:
raise ValueError(
"DataLoader should never be instantiated directly. Use a subclass instead."
)
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
self.tasks = tasks
self.id_field = id_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def featurize(self,
inputs: OneOrMany[Any],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> Dataset:
"""Featurize provided files and write to specified location.
DEPRECATED: This method is now a wrapper for `create_dataset()`
and calls that method under the hood.
For large datasets, automatically shards into smaller chunks
for convenience. This implementation assumes that the helper
methods `_get_shards` and `_featurize_shard` are implemented and
that each shard returned by `_get_shards` is a pandas dataframe.
You may choose to reuse or override this method in your subclass
implementations.
Parameters
----------
inputs: List
List of inputs to process. Entries can be filenames or arbitrary objects.
data_dir: str, default None
Directory to store featurized dataset.
shard_size: int, optional (default 8192)
Number of examples stored in each shard.
Returns
-------
Dataset
A `Dataset` object containing a featurized representation of data
from `inputs`.
"""
warnings.warn(
"featurize() is deprecated and has been renamed to create_dataset()."
"featurize() will be removed in DeepChem 3.0", FutureWarning)
return self.create_dataset(inputs, data_dir, shard_size)
def create_dataset(self,
inputs: OneOrMany[Any],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> Dataset:
"""Creates and returns a `Dataset` object by featurizing provided files.
Reads in `inputs` and uses `self.featurizer` to featurize the
data in these inputs. For large files, automatically shards
into smaller chunks of `shard_size` datapoints for convenience.
Returns a `Dataset` object that contains the featurized dataset.
This implementation assumes that the helper methods `_get_shards`
and `_featurize_shard` are implemented and that each shard
returned by `_get_shards` is a pandas dataframe. You may choose
to reuse or override this method in your subclass implementations.
Parameters
----------
inputs: List
List of inputs to process. Entries can be filenames or arbitrary objects.
data_dir: str, optional (default None)
Directory to store featurized dataset.
shard_size: int, optional (default 8192)
Number of examples stored in each shard.
Returns
-------
DiskDataset
A `DiskDataset` object containing a featurized representation of data
from `inputs`.
"""
logger.info("Loading raw samples now.")
logger.info("shard_size: %s" % str(shard_size))
# Special case handling of single input
if not isinstance(inputs, list):
inputs = [inputs]
def shard_generator():
for shard_num, shard in enumerate(self._get_shards(inputs, shard_size)):
time1 = time.time()
X, valid_inds = self._featurize_shard(shard)
ids = shard[self.id_field].values
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results iff they exist.
y, w = _convert_df_to_numpy(shard, self.tasks)
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it
# makes no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir, self.tasks)
def _get_shards(self, inputs: List, shard_size: Optional[int]) -> Iterator:
"""Stub for children classes.
Should implement a generator that walks over the source data in
`inputs` and returns a "shard" at a time. Here a shard is a
chunk of input data that can reasonably be handled in memory. For
example, this may be a set of rows from a CSV file or a set of
molecules from a SDF file. To re-use the
`DataLoader.create_dataset()` method, each shard must be a pandas
dataframe.
If you chose to override `create_dataset()` directly you don't
need to override this helper method.
Parameters
----------
inputs: list
List of inputs to process. Entries can be filenames or arbitrary objects.
shard_size: int, optional
Number of examples stored in each shard.
"""
raise NotImplementedError
def _featurize_shard(self, shard: Any):
"""Featurizes a shard of input data.
Recall a shard is a chunk of input data that can reasonably be
handled in memory. For example, this may be a set of rows from a
CSV file or a set of molecules from a SDF file. Featurize this
shard in memory and return the results.
Parameters
----------
shard: Any
A chunk of input data
"""
raise NotImplementedError
class CSVLoader(DataLoader):
"""
Creates `Dataset` objects from input CSV files.
This class provides conveniences to load data from CSV files.
It's possible to directly featurize data from CSV files using
pandas, but this class may prove useful if you're processing
large CSV files that you don't want to manipulate directly in
memory.
Examples
--------
Let's suppose we have some smiles and labels
>>> smiles = ["C", "CCC"]
>>> labels = [1.5, 2.3]
Let's put these in a dataframe.
>>> import pandas as pd
>>> df = pd.DataFrame(list(zip(smiles, labels)), columns=["smiles", "task1"])
Let's now write this to disk somewhere. We can now use `CSVLoader` to
process this CSV dataset.
>>> import tempfile
>>> import deepchem as dc
>>> with dc.utils.UniversalNamedTemporaryFile(mode='w') as tmpfile:
... df.to_csv(tmpfile.name)
... loader = dc.data.CSVLoader(["task1"], feature_field="smiles",
... featurizer=dc.feat.CircularFingerprint())
... dataset = loader.create_dataset(tmpfile.name)
>>> len(dataset)
2
Of course in practice you should already have your data in a CSV file if
you're using `CSVLoader`. If your data is already in memory, use
`InMemoryLoader` instead.
"""
def __init__(self,
tasks: List[str],
featurizer: Featurizer,
feature_field: Optional[str] = None,
id_field: Optional[str] = None,
smiles_field: Optional[str] = None,
log_every_n: int = 1000):
"""Initializes CSVLoader.
Parameters
----------
tasks: List[str]
List of task names
featurizer: Featurizer
Featurizer to use to process data.
feature_field: str, optional (default None)
Field with data to be featurized.
id_field: str, optional, (default None)
CSV column that holds sample identifier
smiles_field: str, optional (default None) (DEPRECATED)
Name of field that holds smiles string.
log_every_n: int, optional (default 1000)
Writes a logging statement this often.
"""
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
if smiles_field is not None:
logger.warning(
"smiles_field is deprecated and will be removed in a future version of DeepChem."
"Use feature_field instead.")
if feature_field is not None and smiles_field != feature_field:
raise ValueError(
"smiles_field and feature_field if both set must have the same value."
)
elif feature_field is None:
feature_field = smiles_field
self.tasks = tasks
self.feature_field = feature_field
self.id_field = id_field
if id_field is None:
self.id_field = feature_field # Use features as unique ids if necessary
else:
self.id_field = id_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def _get_shards(self, input_files: List[str],
shard_size: Optional[int]) -> Iterator[pd.DataFrame]:
"""Defines a generator which returns data for each shard
Parameters
----------
input_files: List[str]
List of filenames to process
shard_size: int, optional
The size of a shard of data to process at a time.
Returns
-------
Iterator[pd.DataFrame]
Iterator over shards
"""
return load_csv_files(input_files, shard_size)
def _featurize_shard(self,
shard: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
"""Featurizes a shard of an input dataframe.
Parameters
----------
shard: pd.DataFrame
DataFrame that holds a shard of the input CSV file
Returns
-------
features: np.ndarray
Features computed from CSV file.
valid_inds: np.ndarray
Indices of rows in source CSV with valid data.
"""
logger.info("About to featurize shard.")
if self.featurizer is None:
raise ValueError(
"featurizer must be specified in constructor to featurizer data/")
features = [elt for elt in self.featurizer(shard[self.feature_field])]
valid_inds = np.array(
[1 if np.array(elt).size > 0 else 0 for elt in features], dtype=bool)
features = [
elt for (is_valid, elt) in zip(valid_inds, features) if is_valid
]
return np.array(features), valid_inds
class UserCSVLoader(CSVLoader):
"""
Handles loading of CSV files with user-defined features.
This is a convenience class that allows for descriptors already present in a
CSV file to be extracted without any featurization necessary.
Examples
--------
Let's suppose we have some descriptors and labels. (Imagine that these
descriptors have been computed by an external program.)
>>> desc1 = [1, 43]
>>> desc2 = [-2, -22]
>>> labels = [1.5, 2.3]
>>> ids = ["cp1", "cp2"]
Let's put these in a dataframe.
>>> import pandas as pd
>>> df = pd.DataFrame(list(zip(ids, desc1, desc2, labels)), columns=["id", "desc1", "desc2", "task1"])
Let's now write this to disk somewhere. We can now use `UserCSVLoader` to
process this CSV dataset.
>>> import tempfile
>>> import deepchem as dc
>>> featurizer = dc.feat.UserDefinedFeaturizer(["desc1", "desc2"])
>>> with dc.utils.UniversalNamedTemporaryFile(mode='w') as tmpfile:
... df.to_csv(tmpfile.name)
... loader = dc.data.UserCSVLoader(["task1"], id_field="id",
... featurizer=featurizer)
... dataset = loader.create_dataset(tmpfile.name)
>>> len(dataset)
2
>>> dataset.X[0, 0]
1
The difference between `UserCSVLoader` and `CSVLoader` is that our
descriptors (our features) have already been computed for us, but are spread
across multiple columns of the CSV file.
Of course in practice you should already have your data in a CSV file if
you're using `UserCSVLoader`. If your data is already in memory, use
`InMemoryLoader` instead.
"""
def _get_shards(self, input_files: List[str],
shard_size: Optional[int]) -> Iterator[pd.DataFrame]:
"""Defines a generator which returns data for each shard
Parameters
----------
input_files: List[str]
List of filenames to process
shard_size: int, optional
The size of a shard of data to process at a time.
Returns
-------
Iterator[pd.DataFrame]
Iterator over shards
"""
return load_csv_files(input_files, shard_size)
def _featurize_shard(self,
shard: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
"""Featurizes a shard of an input dataframe.
Parameters
----------
shard: pd.DataFrame
DataFrame that holds a shard of the input CSV file
Returns
-------
features: np.ndarray
Features extracted from CSV file.
valid_inds: np.ndarray
Indices of rows in source CSV with valid data.
"""
assert isinstance(self.featurizer, UserDefinedFeaturizer)
time1 = time.time()
feature_fields = self.featurizer.feature_fields
shard[feature_fields] = shard[feature_fields].apply(pd.to_numeric)
X_shard = shard[feature_fields].to_numpy()
time2 = time.time()
logger.info(
"TIMING: user specified processing took %0.3f s" % (time2 - time1))
return (X_shard, np.ones(len(X_shard), dtype=bool))
class JsonLoader(DataLoader):
"""
Creates `Dataset` objects from input json files.
This class provides conveniences to load data from json files.
It's possible to directly featurize data from json files using
pandas, but this class may prove useful if you're processing
large json files that you don't want to manipulate directly in
memory.
It is meant to load JSON files formatted as "records" in line
delimited format, which allows for sharding.
``list like [{column -> value}, ... , {column -> value}]``.
Examples
--------
Let's create the sample dataframe.
>>> composition = ["LiCoO2", "MnO2"]
>>> labels = [1.5, 2.3]
>>> import pandas as pd
>>> df = pd.DataFrame(list(zip(composition, labels)), columns=["composition", "task"])
Dump the dataframe to the JSON file formatted as "records" in line delimited format and
load the json file by JsonLoader.
>>> import tempfile
>>> import deepchem as dc
>>> with dc.utils.UniversalNamedTemporaryFile(mode='w') as tmpfile:
... df.to_json(tmpfile.name, orient='records', lines=True)
... featurizer = dc.feat.ElementPropertyFingerprint()
... loader = dc.data.JsonLoader(["task"], feature_field="composition", featurizer=featurizer)
... dataset = loader.create_dataset(tmpfile.name)
>>> len(dataset)
2
"""
def __init__(self,
tasks: List[str],
feature_field: str,
featurizer: Featurizer,
label_field: Optional[str] = None,
weight_field: Optional[str] = None,
id_field: Optional[str] = None,
log_every_n: int = 1000):
"""Initializes JsonLoader.
Parameters
----------
tasks: List[str]
List of task names
feature_field: str
JSON field with data to be featurized.
featurizer: Featurizer
Featurizer to use to process data
label_field: str, optional (default None)
Field with target variables.
weight_field: str, optional (default None)
Field with weights.
id_field: str, optional (default None)
Field for identifying samples.
log_every_n: int, optional (default 1000)
Writes a logging statement this often.
"""
if not isinstance(tasks, list):
raise ValueError("Tasks must be a list.")
self.tasks = tasks
self.feature_field = feature_field
self.label_field = label_field
self.weight_field = weight_field
self.id_field = id_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def create_dataset(self,
input_files: OneOrMany[str],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> DiskDataset:
"""Creates a `Dataset` from input JSON files.
Parameters
----------
input_files: OneOrMany[str]
List of JSON filenames.
data_dir: Optional[str], default None
Name of directory where featurized data is stored.
shard_size: int, optional (default 8192)
Shard size when loading data.
Returns
-------
DiskDataset
A `DiskDataset` object containing a featurized representation of data
from `input_files`.
"""
if not isinstance(input_files, list):
try:
if isinstance(input_files, str):
input_files = [input_files]
else:
input_files = list(input_files)
except TypeError:
raise ValueError(
"input_files is of an unrecognized form. Must be one filename or a list of filenames."
)
def shard_generator():
"""Yield X, y, w, and ids for shards."""
for shard_num, shard in enumerate(
self._get_shards(input_files, shard_size)):
time1 = time.time()
X, valid_inds = self._featurize_shard(shard)
if self.id_field:
ids = shard[self.id_field].values
else:
ids = np.ones(len(valid_inds))
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results if they exist.
y, w = _convert_df_to_numpy(shard, self.tasks)
if self.label_field:
y = shard[self.label_field]
if self.weight_field:
w = shard[self.weight_field]
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it
# makes no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir, self.tasks)
def _get_shards(self, input_files: List[str],
shard_size: Optional[int]) -> Iterator[pd.DataFrame]:
"""Defines a generator which returns data for each shard
Parameters
----------
input_files: List[str]
List of filenames to process
shard_size: int, optional
The size of a shard of data to process at a time.
Returns
-------
Iterator[pd.DataFrame]
Iterator over shards
"""
return load_json_files(input_files, shard_size)
def _featurize_shard(self,
shard: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
"""Featurizes a shard of an input dataframe.
Helper that computes features for the given shard of data.
Parameters
----------
shard: pd.DataFrame
DataFrame that holds data to be featurized.
Returns
-------
features: np.ndarray
Array of feature vectors. Note that samples for which featurization has
failed will be filtered out.
valid_inds: np.ndarray
Boolean values indicating successful featurization for corresponding
sample in the source.
"""
logger.info("About to featurize shard.")
if self.featurizer is None:
raise ValueError(
"featurizer must be specified in constructor to featurizer data/")
features = [elt for elt in self.featurizer(shard[self.feature_field])]
valid_inds = np.array(
[1 if np.array(elt).size > 0 else 0 for elt in features], dtype=bool)
features = [
elt for (is_valid, elt) in zip(valid_inds, features) if is_valid
]
return np.array(features), valid_inds
class SDFLoader(DataLoader):
"""Creates a `Dataset` object from SDF input files.
This class provides conveniences to load and featurize data from
Structure Data Files (SDFs). SDF is a standard format for structural
information (3D coordinates of atoms and bonds) of molecular compounds.
Examples
--------
>>> import deepchem as dc
>>> import os
>>> current_dir = os.path.dirname(os.path.realpath(__file__))
>>> featurizer = dc.feat.CircularFingerprint(size=16)
>>> loader = dc.data.SDFLoader(["LogP(RRCK)"], featurizer=featurizer, sanitize=True)
>>> dataset = loader.create_dataset(os.path.join(current_dir, "tests", "membrane_permeability.sdf")) # doctest:+ELLIPSIS
>>> len(dataset)
2
"""
def __init__(self,
tasks: List[str],
featurizer: Featurizer,
sanitize: bool = False,
log_every_n: int = 1000):
"""Initialize SDF Loader
Parameters
----------
tasks: list[str]
List of tasknames. These will be loaded from the SDF file.
featurizer: Featurizer
Featurizer to use to process data
sanitize: bool, optional (default False)
Whether to sanitize molecules.
log_every_n: int, optional (default 1000)
Writes a logging statement this often.
"""
self.featurizer = featurizer
self.sanitize = sanitize
self.tasks = tasks
# The field in which dc.utils.save.load_sdf_files stores RDKit mol objects
self.mol_field = "mol"
# The field in which load_sdf_files return value stores smiles
self.id_field = "smiles"
self.log_every_n = log_every_n
def create_dataset(self,
inputs: OneOrMany[Any],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> Dataset:
"""Creates and returns a `Dataset` object by featurizing provided sdf files.
Parameters
----------
inputs: List
List of inputs to process. Entries can be filenames or arbitrary objects.
Each file should be supported format (.sdf) or compressed folder of
.sdf files
data_dir: str, optional (default None)
Directory to store featurized dataset.
shard_size: int, optional (default 8192)
Number of examples stored in each shard.
Returns
-------
DiskDataset
A `DiskDataset` object containing a featurized representation of data
from `inputs`.
"""
logger.info("Loading raw samples now.")
logger.info("shard_size: %s" % str(shard_size))
# Special case handling of single input
if not isinstance(inputs, list):
inputs = [inputs]
processed_files = []
for input_file in inputs:
filename, extension = os.path.splitext(input_file)
extension = extension.lower()
if extension == ".sdf":
processed_files.append(input_file)
elif extension == ".zip":
zip_dir = tempfile.mkdtemp()
zip_ref = zipfile.ZipFile(input_file, 'r')
zip_ref.extractall(path=zip_dir)
zip_ref.close()
zip_files = [os.path.join(zip_dir, name) for name in zip_ref.namelist()]
for zip_file in zip_files:
_, extension = os.path.splitext(zip_file)
extension = extension.lower()
if extension in [".sdf"]:
processed_files.append(zip_file)
else:
raise ValueError("Unsupported file format")
inputs = processed_files
def shard_generator():
for shard_num, shard in enumerate(self._get_shards(inputs, shard_size)):
time1 = time.time()
X, valid_inds = self._featurize_shard(shard)
ids = shard[self.id_field].values
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results iff they exist.
y, w = _convert_df_to_numpy(shard, self.tasks)
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it
# makes no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir, self.tasks)
def _get_shards(self, input_files: List[str],
shard_size: Optional[int]) -> Iterator[pd.DataFrame]:
"""Defines a generator which returns data for each shard
Parameters
----------
input_files: List[str]
List of filenames to process
shard_size: int, optional
The size of a shard of data to process at a time.
Returns
-------
Iterator[pd.DataFrame]
Iterator over shards
"""
return load_sdf_files(
input_files=input_files,
clean_mols=self.sanitize,
tasks=self.tasks,
shard_size=shard_size)
def _featurize_shard(self,
shard: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
"""Featurizes a shard of an input dataframe.
Helper that computes features for the given shard of data.
Parameters
----------
shard: pd.DataFrame
DataFrame that holds data to be featurized.
Returns
-------
features: np.ndarray
Array of feature vectors. Note that samples for which featurization has
failed will be filtered out.
valid_inds: np.ndarray
Boolean values indicating successful featurization for corresponding
sample in the source.
"""
features = [elt for elt in self.featurizer(shard[self.mol_field])]
valid_inds = np.array(
[1 if np.array(elt).size > 0 else 0 for elt in features], dtype=bool)
features = [
elt for (is_valid, elt) in zip(valid_inds, features) if is_valid
]
return np.array(features), valid_inds
class FASTALoader(DataLoader):
"""Handles loading of FASTA files.
FASTA files are commonly used to hold sequence data. This
class provides convenience files to lead FASTA data and
one-hot encode the genomic sequences for use in downstream
learning tasks.
"""
def __init__(self,
featurizer: Optional[Featurizer] = None,
auto_add_annotations: bool = False,
legacy: bool = True):
"""Initialize FASTALoader.
Parameters
----------
featurizer: Featurizer (default: None)
The Featurizer to be used for the loaded FASTA data.
If featurizer is None and legacy is True, the original featurization
logic is used, creating a one hot encoding of all included FASTA strings
of shape
(number of FASTA sequences, number of channels + 1, sequence length, 1).
If featurizer is None and legacy is False, the featurizer is initialized
as a OneHotFeaturizer object with charset ("A", "C", "T", "G") and
max_length = None.
auto_add_annotations: bool (default False)
Whether create_dataset will automatically add [CLS] and [SEP] annotations
to the sequences it reads in order to assist tokenization.
Keep False if your FASTA file already includes [CLS] and [SEP] annotations.
legacy: bool (default True)
Whether to use legacy logic for featurization. Legacy mode will create
a one hot encoding of the FASTA content of shape
(number of FASTA sequences, number of channels + 1, max length, 1).
Legacy mode is only tested for ACTGN charsets, and will be deprecated.
"""
# Process legacy toggle
if legacy:
warnings.warn(
"""
Legacy mode is deprecated and will be removed in
DeepChem 3.0. Disable legacy mode by passing legacy=False
during construction of FASTALoader object.
""", FutureWarning)
if featurizer is not None or auto_add_annotations:
raise ValueError(f"""
featurizer option must be None and
auto_add_annotations must be false when legacy mode
is enabled. You set featurizer to {featurizer} and
auto_add_annotations to {auto_add_annotations}.
""")
# Set attributes
self.legacy = legacy
self.auto_add_annotations = auto_add_annotations
self.user_specified_features = None
# Handle special featurizer cases
if isinstance(featurizer, UserDefinedFeaturizer): # User defined featurizer
self.user_specified_features = featurizer.feature_fields
elif featurizer is None: # Default featurizer
featurizer = OneHotFeaturizer(
charset=["A", "C", "T", "G"], max_length=None)
# Set self.featurizer
self.featurizer = featurizer
def create_dataset(self,
input_files: OneOrMany[str],
data_dir: Optional[str] = None,
shard_size: Optional[int] = None) -> DiskDataset:
"""Creates a `Dataset` from input FASTA files.
At present, FASTA support is limited and doesn't allow for sharding.
Parameters
----------
input_files: List[str]
List of fasta files.
data_dir: str, optional (default None)
Name of directory where featurized data is stored.
shard_size: int, optional (default None)
For now, this argument is ignored and each FASTA file gets its
own shard.
Returns
-------
DiskDataset
A `DiskDataset` object containing a featurized representation of data
from `input_files`.
"""
if isinstance(input_files, str):
input_files = [input_files]
def shard_generator(): # TODO Enable sharding with shard size parameter
for input_file in input_files:
if self.legacy:
X = encode_bio_sequence(input_file)
else:
sequences = _read_file(input_file)
X = self.featurizer(sequences)
ids = np.ones(len(X))
# (X, y, w, ids)
yield X, None, None, ids
def _read_file(input_file: str, auto_add_annotations: bool = False):
"""
Convert the FASTA file to a numpy array of FASTA-format strings.
"""
# TODO don't convert all sequences into np array (allow shards)
def _generate_sequences(fasta_file, header_mark=">") -> np.array:
"""
Uses a fasta_file to create a numpy array of annotated FASTA-format strings
"""
sequences = np.array([])
sequence = np.array([])
header_read = False
for line in fasta_file:
# Check if line is a header
if line.startswith(header_mark): # New header line
header_read = True
sequences = _add_sequence(sequences, sequence)
sequence = np.array([])
elif header_read: # Line contains sequence in FASTA format
if line[-1:] == '\n': # Check last character in string
line = line[0:-1] # Remove last character
sequence = np.append(sequence, line)
sequences = _add_sequence(sequences, sequence) # Add last sequence
return sequences
def _add_sequence(sequences: np.array, sequence: np.array) -> np.array:
# Handle empty sequence
if sequence is None or len(sequence) <= 0:
# TODO log attempts to add empty sequences every shard
return np.array([])
# Annotate start/stop of sequence
if auto_add_annotations:
sequence = np.insert(sequence, 0, "[CLS]")
sequence = np.append(sequence, "[SEP]")
new_sequence = ''.join(sequence)
new_sequences = np.append(sequences, new_sequence)
return new_sequences
with open(input_file, 'r') as f: # Read FASTA file
return _generate_sequences(f)
return DiskDataset.create_dataset(shard_generator(), data_dir)
class ImageLoader(DataLoader):
"""Handles loading of image files.
This class allows for loading of images in various formats.
For user convenience, also accepts zip-files and directories
of images and uses some limited intelligence to attempt to
traverse subdirectories which contain images.
"""
def __init__(self, tasks: Optional[List[str]] = None):
"""Initialize image loader.
At present, custom image featurizers aren't supported by this
loader class.
Parameters
----------
tasks: List[str], optional (default None)
List of task names for image labels.
"""
if tasks is None:
tasks = []
self.tasks = tasks
def create_dataset(self,
inputs: Union[OneOrMany[str], Tuple[Any]],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192,
in_memory: bool = False) -> Dataset:
"""Creates and returns a `Dataset` object by featurizing provided image files and labels/weights.
Parameters
----------
inputs: `Union[OneOrMany[str], Tuple[Any]]`
The inputs provided should be one of the following
- filename
- list of filenames
- Tuple (list of filenames, labels)
- Tuple (list of filenames, labels, weights)
Each file in a given list of filenames should either be of a supported
image format (.png, .tif only for now) or of a compressed folder of
image files (only .zip for now). If `labels` or `weights` are provided,
they must correspond to the sorted order of all filenames provided, with
one label/weight per file.
data_dir: str, optional (default None)
Directory to store featurized dataset.
shard_size: int, optional (default 8192)
Shard size when loading data.
in_memory: bool, optioanl (default False)
If true, return in-memory NumpyDataset. Else return ImageDataset.
Returns
-------
ImageDataset or NumpyDataset or DiskDataset
- if `in_memory == False`, the return value is ImageDataset.
- if `in_memory == True` and `data_dir is None`, the return value is NumpyDataset.
- if `in_memory == True` and `data_dir is not None`, the return value is DiskDataset.
"""
labels, weights = None, None
if isinstance(inputs, tuple):
if len(inputs) == 1:
input_files = inputs[0]
if isinstance(inputs, str):
input_files = [inputs]
elif len(inputs) == 2:
input_files, labels = inputs
elif len(inputs) == 3:
input_files, labels, weights = inputs
else:
raise ValueError("Input must be a tuple of length 1, 2, or 3")
else:
input_files = inputs
if isinstance(input_files, str):
input_files = [input_files]
image_files = []
# Sometimes zip files contain directories within. Traverse directories
while len(input_files) > 0:
remainder = []
for input_file in input_files:
filename, extension = os.path.splitext(input_file)
extension = extension.lower()
# TODO(rbharath): Add support for more extensions
if os.path.isdir(input_file):
dirfiles = [
os.path.join(input_file, subfile)
for subfile in os.listdir(input_file)
]
remainder += dirfiles
elif extension == ".zip":
zip_dir = tempfile.mkdtemp()
zip_ref = zipfile.ZipFile(input_file, 'r')
zip_ref.extractall(path=zip_dir)
zip_ref.close()
zip_files = [
os.path.join(zip_dir, name) for name in zip_ref.namelist()
]
for zip_file in zip_files:
_, extension = os.path.splitext(zip_file)
extension = extension.lower()
if extension in [".png", ".tif"]:
image_files.append(zip_file)
elif extension in [".png", ".tif"]:
image_files.append(input_file)
else:
raise ValueError("Unsupported file format")
input_files = remainder
# Sort image files
image_files = sorted(image_files)
if in_memory:
if data_dir is None:
return NumpyDataset(
load_image_files(image_files), y=labels, w=weights, ids=image_files)
else:
dataset = DiskDataset.from_numpy(
load_image_files(image_files),
y=labels,
w=weights,
ids=image_files,
tasks=self.tasks,
data_dir=data_dir)
if shard_size is not None:
dataset.reshard(shard_size)
return dataset
else:
return ImageDataset(image_files, y=labels, w=weights, ids=image_files)
class InMemoryLoader(DataLoader):
"""Facilitate Featurization of In-memory objects.
When featurizing a dataset, it's often the case that the initial set of
data (pre-featurization) fits handily within memory. (For example, perhaps
it fits within a column of a pandas DataFrame.) In this case, it would be
convenient to directly be able to featurize this column of data. However,
the process of featurization often generates large arrays which quickly eat
up available memory. This class provides convenient capabilities to process
such in-memory data by checkpointing generated features periodically to
disk.
Example
-------
Here's an example with only datapoints and no labels or weights.
>>> import deepchem as dc
>>> smiles = ["C", "CC", "CCC", "CCCC"]
>>> featurizer = dc.feat.CircularFingerprint()
>>> loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
>>> dataset = loader.create_dataset(smiles, shard_size=2)
>>> len(dataset)
4
Here's an example with both datapoints and labels
>>> import deepchem as dc
>>> smiles = ["C", "CC", "CCC", "CCCC"]
>>> labels = [1, 0, 1, 0]
>>> featurizer = dc.feat.CircularFingerprint()
>>> loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
>>> dataset = loader.create_dataset(zip(smiles, labels), shard_size=2)
>>> len(dataset)
4
Here's an example with datapoints, labels, weights and ids all provided.
>>> import deepchem as dc
>>> smiles = ["C", "CC", "CCC", "CCCC"]
>>> labels = [1, 0, 1, 0]
>>> weights = [1.5, 0, 1.5, 0]
>>> ids = ["C", "CC", "CCC", "CCCC"]
>>> featurizer = dc.feat.CircularFingerprint()
>>> loader = dc.data.InMemoryLoader(tasks=["task1"], featurizer=featurizer)
>>> dataset = loader.create_dataset(zip(smiles, labels, weights, ids), shard_size=2)
>>> len(dataset)
4
"""
def create_dataset(self,
inputs: Sequence[Any],
data_dir: Optional[str] = None,
shard_size: Optional[int] = 8192) -> DiskDataset:
"""Creates and returns a `Dataset` object by featurizing provided files.
Reads in `inputs` and uses `self.featurizer` to featurize the
data in these input files. For large files, automatically shards
into smaller chunks of `shard_size` datapoints for convenience.
Returns a `Dataset` object that contains the featurized dataset.
This implementation assumes that the helper methods `_get_shards`
and `_featurize_shard` are implemented and that each shard
returned by `_get_shards` is a pandas dataframe. You may choose
to reuse or override this method in your subclass implementations.
Parameters
----------
inputs: Sequence[Any]
List of inputs to process. Entries can be arbitrary objects so long as
they are understood by `self.featurizer`
data_dir: str, optional (default None)
Directory to store featurized dataset.
shard_size: int, optional (default 8192)
Number of examples stored in each shard.
Returns
-------
DiskDataset
A `DiskDataset` object containing a featurized representation of data
from `inputs`.
"""
logger.info("Loading raw samples now.")
logger.info("shard_size: %s" % str(shard_size))
if not isinstance(inputs, list):
try:
inputs = list(inputs)
except TypeError:
inputs = [inputs]
def shard_generator():
global_index = 0
for shard_num, shard in enumerate(self._get_shards(inputs, shard_size)):
time1 = time.time()
X, y, w, ids = self._featurize_shard(shard, global_index)
global_index += len(shard)
time2 = time.time()
logger.info("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1))
yield X, y, w, ids
return DiskDataset.create_dataset(shard_generator(), data_dir, self.tasks)
def _get_shards(self, inputs: List,
shard_size: Optional[int]) -> Iterator[pd.DataFrame]:
"""Break up input into shards.
Parameters
----------
inputs: List
Each entry in this list must be of the form `(featurization_input,
label, weight, id)` or `(featurization_input, label, weight)` or
`(featurization_input, label)` or `featurization_input` for one
datapoint, where `featurization_input` is any input that is recognized
by `self.featurizer`.
shard_size: int, optional
The size of shard to generate.
Returns
-------
Iterator[pd.DataFrame]
Iterator which iterates over shards of data.
"""
current_shard: List = []
for i, datapoint in enumerate(inputs):
if i != 0 and shard_size is not None and i % shard_size == 0:
shard_data = current_shard
current_shard = []
yield shard_data
current_shard.append(datapoint)
yield current_shard
# FIXME: Signature of "_featurize_shard" incompatible with supertype "DataLoader"
def _featurize_shard( # type: ignore[override]
self, shard: List, global_index: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Featurizes a shard of an input data.
Parameters
----------
shard: List
List each entry of which must be of the form `(featurization_input,
label, weight, id)` or `(featurization_input, label, weight)` or
`(featurization_input, label)` or `featurization_input` for one
datapoint, where `featurization_input` is any input that is recognized
by `self.featurizer`.
global_index: int
The starting index for this shard in the full set of provided inputs
Returns
------
Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
The tuple is `(X, y, w, ids)`. All values are numpy arrays.
"""
features = []
labels = []
weights = []
ids = []
n_tasks = len(self.tasks)
for i, entry in enumerate(shard):
if not isinstance(entry, tuple):
entry = (entry,)
if len(entry) > 4:
raise ValueError(
"Entry is malformed and must be of length 1-4 containing featurization_input"
"and optionally label, weight, and id.")
if len(entry) == 4:
featurization_input, label, weight, entry_id = entry
elif len(entry) == 3:
featurization_input, label, weight = entry
entry_id = global_index + i
elif len(entry) == 2:
featurization_input, label = entry
weight = np.ones((n_tasks), np.float32)
entry_id = global_index + i
elif len(entry) == 1:
featurization_input = entry
label = np.zeros((n_tasks), np.float32)
weight = np.zeros((n_tasks), np.float32)
entry_id = global_index + i
feature = self.featurizer(featurization_input)
features.append(feature)
weights.append(weight)
labels.append(label)
ids.append(entry_id)
X = np.concatenate(features, axis=0)
return X, np.array(labels), np.array(weights), np.array(ids)
| mit |
DistrictDataLabs/yellowbrick | tests/test_style/test_colors.py | 1 | 11606 | # tests.test_style.test_colors
# Tests for the color utilities and helpers module
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Thu Oct 06 09:30:49 2016 -0400
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_colors.py [c6aff34] benjamin@bengfort.com $
"""
Tests for the color utilities and helper functions
"""
##########################################################################
## Imports
##########################################################################
import sys
import pytest
from matplotlib import cm
from cycler import Cycler
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from yellowbrick.style.colors import *
from yellowbrick.style.palettes import ColorPalette, PALETTES
from yellowbrick.cluster.silhouette import SilhouetteVisualizer
from tests.base import VisualTestCase
##########################################################################
## Color Tests
##########################################################################
class TestGetColorCycle(VisualTestCase):
"""
Test get_color_cycle helper function
"""
def test_cycle_depends_on_palette(self):
"""
Ensure the color cycle depends on the palette
"""
c = get_color_cycle()
assert len(c) == 6
with ColorPalette("paired"):
c = get_color_cycle()
assert len(c) == 12
c = get_color_cycle()
assert len(c) == 6
@pytest.mark.filterwarnings()
@pytest.mark.skipif(not mpl_ge_150, reason="requires matplotlib 1.5 or later")
def test_mpl_ge_150(self):
"""
Test get color cycle with matplotlib 1.5 or later
"""
colors = get_color_cycle()
cycle = mpl.rcParams["axes.prop_cycle"]
# Ensure the cycle is in fact a cycle
assert isinstance(cycle, Cycler)
# Ensure that colors is actually a list (might change in the future)
assert isinstance(colors, list)
# Ensure the cycler and the colors have the same length
cycle = list(cycle)
assert len(colors) == len(cycle)
# Ensure the colors and cycle match
for color, cycle_color in zip(colors, cycle):
assert color == cycle_color["color"]
@pytest.mark.filterwarnings()
@pytest.mark.skipif(mpl_ge_150, reason="requires matplotlib ealier than 1.5")
def test_mpl_lt_150(self):
"""
Test get color cycle with matplotlib earlier than 1.5
"""
assert get_color_cycle() == mpl.rcParams["axes.color_cycle"]
class TestResolveColors(VisualTestCase):
"""
Test resolve_colors helper function
"""
def test_resolve_colors_default(self):
"""
Provides reasonable defaults provided no arguments
"""
colors = resolve_colors()
assert colors == get_color_cycle()
def test_resolve_colors_default_truncate(self):
"""
Truncates default colors when n_colors is smaller than palette
"""
assert len(get_color_cycle()) > 3
assert len(resolve_colors(3)) == 3
def test_resolve_colors_default_multiply(self):
"""
Multiplies default colors when n_colors is larger than palette
"""
assert len(get_color_cycle()) < 18
assert len(resolve_colors(18)) == 18
def test_warning_on_colormap_and_colors_args(self):
"""
Warns when both colormap and colors is used, colors is default
"""
with pytest.warns(Warning, match="both colormap and colors specified"):
colors = resolve_colors(colormap="RdBu", colors=["r", "g", "b"])
assert colors == ["r", "g", "b"]
def test_colormap_invalid(self):
"""
Exception raised when invalid colormap is supplied
"""
with pytest.raises(YellowbrickValueError):
resolve_colors(12, colormap="foo")
def test_colormap_string(self):
"""
Check resolve colors works when a colormap string is passed
"""
cases = (
(
{"n_colors": 6, "colormap": "RdBu"},
[
(0.403921568627451, 0.0, 0.12156862745098039, 1.0),
(0.8392156862745098, 0.3764705882352941, 0.30196078431372547, 1.0),
(0.9921568627450981, 0.8588235294117647, 0.7803921568627451, 1.0),
(0.8196078431372551, 0.8980392156862746, 0.9411764705882353, 1.0),
(0.2627450980392157, 0.5764705882352941, 0.7647058823529411, 1.0),
(0.0196078431372549, 0.18823529411764706, 0.3803921568627451, 1.0),
],
),
(
{"n_colors": 18, "colormap": "viridis"},
[
(0.267004, 0.004874, 0.329415, 1.0),
(0.281924, 0.089666, 0.412415, 1.0),
(0.280255, 0.165693, 0.476498, 1.0),
(0.263663, 0.237631, 0.518762, 1.0),
(0.237441, 0.305202, 0.541921, 1.0),
(0.208623, 0.367752, 0.552675, 1.0),
(0.182256, 0.426184, 0.55712, 1.0),
(0.159194, 0.482237, 0.558073, 1.0),
(0.13777, 0.537492, 0.554906, 1.0),
(0.121148, 0.592739, 0.544641, 1.0),
(0.128087, 0.647749, 0.523491, 1.0),
(0.180653, 0.701402, 0.488189, 1.0),
(0.274149, 0.751988, 0.436601, 1.0),
(0.395174, 0.797475, 0.367757, 1.0),
(0.535621, 0.835785, 0.281908, 1.0),
(0.688944, 0.865448, 0.182725, 1.0),
(0.845561, 0.887322, 0.099702, 1.0),
(0.993248, 0.906157, 0.143936, 1.0),
],
),
(
{"n_colors": 9, "colormap": "Set1"},
[
(0.8941176470588236, 0.10196078431372549, 0.10980392156862745, 1.0),
(0.21568627450980393, 0.49411764705882355, 0.7215686274509804, 1.0),
(0.30196078431372547, 0.6862745098039216, 0.2901960784313726, 1.0),
(0.596078431372549, 0.3058823529411765, 0.6392156862745098, 1.0),
(1.0, 0.4980392156862745, 0.0, 1.0),
(1.0, 1.0, 0.2, 1.0),
(0.6509803921568628, 0.33725490196078434, 0.1568627450980392, 1.0),
(0.9686274509803922, 0.5058823529411764, 0.7490196078431373, 1.0),
(0.6, 0.6, 0.6, 1.0),
],
),
)
for kwds, expected in cases:
colors = resolve_colors(**kwds)
assert isinstance(colors, list)
assert colors == expected
def test_colormap_string_default_length(self):
"""
Check colormap when n_colors is not specified
"""
n_colors = len(get_color_cycle())
assert len(resolve_colors(colormap="autumn")) == n_colors
def test_colormap_cmap(self):
"""
Assert that supplying a maptlotlib.cm as colormap works
"""
cmap = cm.get_cmap("nipy_spectral")
colors = resolve_colors(4, colormap=cmap)
assert colors == [
(0.0, 0.0, 0.0, 1.0),
(0.0, 0.6444666666666666, 0.7333666666666667, 1.0),
(0.7999666666666666, 0.9777666666666667, 0.0, 1.0),
(0.8, 0.8, 0.8, 1.0),
]
def test_colormap_palette_mpl(self):
"""
Assert that supplying a maptlotlib palette as colormap works
"""
colormap = cm.get_cmap("nipy_spectral")
colors = resolve_colors(colormap=colormap)
assert colors == [
(0.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 0.8667, 1.0),
(0.0, 0.6667, 0.5333, 1.0),
(0.0, 1.0, 0.0, 1.0),
(1.0, 0.6, 0.0, 1.0),
(0.8, 0.8, 0.8, 1.0),
]
def test_integrated_yb_colormap(self):
"""
Assert silhouette plot colormap can be set with a yellowbrick palette
"""
# Generate a blobs data set
X, y = make_blobs(
n_samples=1000, n_features=12, centers=8, shuffle=False, random_state=0
)
visualizer = SilhouetteVisualizer(
KMeans(random_state=0), colormap="neural_paint"
)
visualizer.fit(X)
visualizer.finalize()
tol = (
3.2 if sys.platform == "win32" else 0.01
) # Fails on AppVeyor with RMS 3.143
self.assert_images_similar(visualizer, remove_legend=True, tol=tol)
def test_colormap_palette_yb(self):
"""
Assert that supplying a yellowbrick palette as colormap works
"""
colormap = ColorPalette("neural_paint")
assert resolve_colors(colormap=colormap) == [
(0.08627450980392157, 0.44313725490196076, 0.5725490196078431),
(0.43137254901960786, 0.4588235294117647, 0.2823529411764706),
(0.7725490196078432, 0.6352941176470588, 0.6705882352941176),
(0.0, 0.8, 1.0),
(0.8705882352941177, 0.47058823529411764, 0.6823529411764706),
(1.0, 0.8, 0.6),
(0.23921568627450981, 0.24705882352941178, 0.25882352941176473),
(1.0, 1.0, 0.8),
]
def test_colormap_cmap_with_colors(self):
"""
Assert that colors overrides a mpl colormap if both are provided
"""
colormap = cm.get_cmap("nipy_spectral")
overriding_colors = [
(0.0, 0.0, 0.0, 1.0),
(0.0, 0.6444666666666666, 0.7333666666666667, 1.0),
(0.7999666666666666, 0.9777666666666667, 0.0, 1.0),
(0.8, 0.8, 0.8, 1.0),
]
with pytest.warns(Warning, match="both colormap and colors specified"):
colors = resolve_colors(colormap=colormap, colors=overriding_colors)
assert colors == overriding_colors
def test_colormap_palette_yb_colors(self):
"""
Assert that colors overrides a yellowbrick colormap if both are provided
"""
colormap = ColorPalette("neural_paint")
overriding_colors = [
(0.0, 0.0, 0.0, 1.0),
(0.0, 0.6444666666666666, 0.7333666666666667, 1.0),
(0.7999666666666666, 0.9777666666666667, 0.0, 1.0),
(0.8, 0.8, 0.8, 1.0),
]
with pytest.warns(Warning, match="both colormap and colors specified"):
colors = resolve_colors(colormap=colormap, colors=overriding_colors)
assert colors == overriding_colors
def test_colormap_invalid_type(self):
"""
Exception raised when invalid colormap type is supplied
"""
with pytest.raises(YellowbrickValueError):
a = lambda x: x + 1
resolve_colors(colormap=a)
def test_colors(self):
"""
Test passing in a list of colors
"""
c = PALETTES["flatui"]
assert resolve_colors(colors=c) == c
def test_colors_truncate(self):
"""
Test passing in a list of colors with n_colors truncate
"""
c = PALETTES["flatui"]
assert len(c) > 3
assert len(resolve_colors(n_colors=3, colors=c)) == 3
def test_colors_multiply(self):
"""
Test passing in a list of colors with n_colors multiply
"""
c = PALETTES["flatui"]
assert len(c) < 12
assert len(resolve_colors(n_colors=12, colors=c)) == 12
| apache-2.0 |
scizen9/kpy | SEDMrph/flexure_test.py | 1 | 6205 | # -*- coding: utf-8 -*-
"""
Created on Mon May 9 12:25:51 2016
@author: nblago
"""
import matplotlib
matplotlib.use("Agg")
import sextractor
import glob, os
import numpy as np
from astropy.coordinates import SkyCoord
from matplotlib import pylab as plt
import argparse
from astropy.io import fits as pf
import fitsutils
import datetime
import argparse
import logging
#Log into a file
FORMAT = '%(asctime)-15s %(levelname)s [%(name)s] %(message)s'
root_dir = "/tmp/"
now = datetime.datetime.utcnow()
timestamp=datetime.datetime.isoformat(now)
timestamp=timestamp.split("T")[0]
logging.basicConfig(format=FORMAT, filename=os.path.join(root_dir, "listener_{0}.log".format(timestamp)), level=logging.INFO)
logger = logging.getLogger('flexure')
def run_flexure_test(sexfiles, plotdir):
'''
Compares the positions for the extracted lines for all the files given as a parameter.
'''
sexfiles.sort()
posfiles = []
logger.info("Running flexure test with %s files."%sexfiles)
with open(os.path.join(plotdir, "flexure.log"), "w") as out:
for sf in sexfiles:
c = np.genfromtxt(sf)
# We want bright sources.
c = c[c[:,4]< -9]
# We want round-shaped sources.
c = c[c[:,8]<0.6]
#Save the positions in a separate file with only X Y.
np.savetxt(sf.replace(".sex", ".pos"), c[:,0:2])
posfiles.append(sf.replace(".sex", ".pos"))
c0 = np.genfromtxt(posfiles[0])
i = 1
while len(c0) ==0 and i<len(posfiles):
c0 = np.genfromtxt(posfiles[i])
i = i+1
flexure = []
for f in posfiles[i:]:
print f
c1 = np.genfromtxt(f)
c = SkyCoord(x=c0[:,0], y=c0[:,1], z=np.zeros(len(c0)), unit='m', representation='cartesian')
catalog = SkyCoord(x=c1[:,0], y=c1[:,1], z=np.zeros(len(c1)), unit='m', representation='cartesian')#SkyCoord(x=c1[:0]*u.pixel, y=c1[:,1]*u.pixel, z=np.zeros(len(c0))*u.pixel, representation='cartesian')
#Now idx are indices into catalog that are the closest objects to each of the
#coordinates in c, d2d are the on-sky distances between them.
idx, d2d, d3d = c.match_to_catalog_sky(catalog)
matches = catalog[idx]
#Only consider it is a match if the distance is less than 10 pixels.
matches = matches[d3d.value<10]
d3d = d3d[d3d.value<10]
#Add the flexure for plotting later.
flexure.append(np.median(d3d.value))
out.write("%s,%s,%.3f\n"%(os.path.basename(posfiles[0]), os.path.basename(f), np.median(d3d.value)))
'''plt.hist(d3d, bins=50)
plt.xlabel("Deviation [pixels]")
plt.title("Median deviation: %.3f"%np.median(d3d.value))
plt.savefig(os.path.join(plotdir, "%s_vs_%s.png"%(os.path.basename(posfiles[0]), os.path.basename(f))))
plt.clf()
plt.scatter(matches.x.value, matches.y.value, c=np.minimum(np.mean(d3d.value)+2*np.std(d3d.value), d3d.value))
c = plt.colorbar()
c.set_label("Deviation [pixels]")
plt.savefig(os.path.join(plotdir, "%s_vs_%s_XY.png"%(os.path.basename(posfiles[0]), os.path.basename(f))))
plt.clf()'''
plot_flexure(os.path.join(plotdir, "flexure.log"), plotdir)
def plot_flexure(flexfile, plotdir):
import matplotlib.dates as md
xfmt = md.DateFormatter('%d %b %H h')
flexure = np.genfromtxt(flexfile, dtype=None, delimiter=",")
dateflex = []
for fl in flexure:
#Format of the file: ifu20160630_00_08_38
t = datetime.datetime.strptime(fl["f1"], 'ifu%Y%m%d_%H_%M_%S.pos')
dateflex.append(t)
plt.plot(dateflex, flexure["f2"], "o-")
plt.xlabel("Date")
plt.ylabel("Median (flexure) [pixels]")
plt.gca().xaxis.set_major_formatter(xfmt)
labels = plt.gca().get_xticklabels()
plt.setp(labels, rotation=30, fontsize=10)
curdate = datetime.datetime.strftime(dateflex[-1], "%Y%m%d")
plt.savefig(os.path.join(plotdir, "flexure_%s.png"%curdate), bbox_inches='tight')
plt.clf()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=\
'''
Runs astrometry.net on the image specified as a parameter and returns
the offset needed to be applied in order to center the object coordinates
in the reference pixel.
''', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-d', dest="raw", type=str, help='Directory containing the raw fits for the night.', default=None)
args = parser.parse_args()
raw = args.raw
if (raw is None):
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow())
timestamp = timestamp.split("T")[0].replace("-","")
raw = os.path.join("/scr2/sedm/phot/", timestamp)
#Get the files from the same day directory.
files = glob.glob(os.path.join(raw, "ifu*fits"))
#files_hg = [f for f in files if "Calib: Hg" in get_par(f, "OBJECT")]
#Get the files from one day before directory to see the differences between days.
daybefore = datetime.datetime.isoformat(datetime.datetime.utcnow()-datetime.timedelta(1))
daybefore = daybefore.split("T")[0].replace("-","")
daybefore = os.path.join("/scr2/sedm/phot/", daybefore)
if(os.path.isdir(daybefore)):
filesold = glob.glob(os.path.join(daybefore, "ifu*fits"))
files.extend(filesold)
files_hg = [f for f in files if fitsutils.has_par(f, "OBJECT") and "Calib: Hg" in fitsutils.get_par(f, "OBJECT")]
logger.info("Found the following Hg files: %s"%files_hg)
if (len(files_hg)>1):
files_hg.sort()
sexfiles = sextractor.run_sex(files_hg, mask=False)
plotdir = os.path.join(raw, "stats")
if (not os.path.isdir(plotdir)):
os.makedirs(plotdir)
run_flexure_test(sexfiles, plotdir=plotdir)
| gpl-2.0 |
kdebrab/pandas | pandas/tests/indexing/test_categorical.py | 5 | 26870 | # -*- coding: utf-8 -*-
import pytest
import pandas as pd
import pandas.compat as compat
import numpy as np
from pandas import (Series, DataFrame, Timestamp, Categorical,
CategoricalIndex, Interval, Index)
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas.util import testing as tm
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.dtypes.dtypes import CategoricalDtype
class TestCategoricalIndex(object):
def setup_method(self, method):
self.df = DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca')).astype(
CDT(list('cab')))}).set_index('B')
self.df2 = DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca')).astype(
CDT(list('cabe')))}).set_index('B')
self.df3 = DataFrame({'A': np.arange(6, dtype='int64'),
'B': (Series([1, 1, 2, 1, 3, 2])
.astype(CDT([3, 2, 1], ordered=True)))
}).set_index('B')
self.df4 = DataFrame({'A': np.arange(6, dtype='int64'),
'B': (Series([1, 1, 2, 1, 3, 2])
.astype(CDT([3, 2, 1], ordered=False)))
}).set_index('B')
def test_loc_scalar(self):
result = self.df.loc['a']
expected = (DataFrame({'A': [0, 1, 5],
'B': (Series(list('aaa'))
.astype(CDT(list('cab'))))})
.set_index('B'))
assert_frame_equal(result, expected)
df = self.df.copy()
df.loc['a'] = 20
expected = (DataFrame({'A': [20, 20, 2, 3, 4, 20],
'B': (Series(list('aabbca'))
.astype(CDT(list('cab'))))})
.set_index('B'))
assert_frame_equal(df, expected)
# value not in the categories
pytest.raises(KeyError, lambda: df.loc['d'])
def f():
df.loc['d'] = 10
pytest.raises(TypeError, f)
def f():
df.loc['d', 'A'] = 10
pytest.raises(TypeError, f)
def f():
df.loc['d', 'C'] = 10
pytest.raises(TypeError, f)
def test_getitem_scalar(self):
cats = Categorical([Timestamp('12-31-1999'),
Timestamp('12-31-2000')])
s = Series([1, 2], index=cats)
expected = s.iloc[0]
result = s[cats[0]]
assert result == expected
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
assert sliced == "d"
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
tm.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1], dtype=np.int64)
tm.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, Interval(0, 25)], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, Interval(0, 25)], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
# single value
res_val = df.iloc[2, 0]
assert res_val == exp_val
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
# single value
res_val = df.loc["j", "cats"]
assert res_val == exp_val
# ix
# frame
# res_df = df.loc["j":"k",[0,1]] # doesn't work?
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
# single value
res_val = df.loc["j", df.columns[0]]
assert res_val == exp_val
# iat
res_val = df.iat[2, 0]
assert res_val == exp_val
# at
res_val = df.at["j", "cats"]
assert res_val == exp_val
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.at["j", "cats"]
assert res_val == exp_val
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
assert isinstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
assert is_categorical_dtype(res_df["cats"])
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
assert is_categorical_dtype(res_col)
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
assert is_categorical_dtype(res_df["cats"])
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
assert is_categorical_dtype(res_df["cats"])
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(["a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", df.columns[0:1]]
expected = DataFrame({'cats': Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c'])},
index=['h', 'i', 'j'])
tm.assert_frame_equal(result, expected)
def test_getitem_category_type(self):
# GH 14580
# test iloc() on Series with Categorical data
s = Series([1, 2, 3]).astype('category')
# get slice
result = s.iloc[0:2]
expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
# get list of indexes
result = s.iloc[[0, 1]]
expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
# get boolean array
result = s.iloc[[True, False, False]]
expected = Series([1]).astype(CategoricalDtype([1, 2, 3]))
tm.assert_series_equal(result, expected)
def test_loc_listlike(self):
# list of labels
result = self.df.loc[['c', 'a']]
expected = self.df.iloc[[4, 0, 1, 5]]
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.loc[['a', 'b', 'e']]
exp_index = CategoricalIndex(
list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan]}, index=exp_index)
assert_frame_equal(result, expected, check_index_type=True)
# element in the categories but not in the values
pytest.raises(KeyError, lambda: self.df2.loc['e'])
# assign is ok
df = self.df2.copy()
df.loc['e'] = 20
result = df.loc[['a', 'b', 'e']]
exp_index = CategoricalIndex(
list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A': [0, 1, 5, 2, 3, 20]}, index=exp_index)
assert_frame_equal(result, expected)
df = self.df2.copy()
result = df.loc[['a', 'b', 'e']]
exp_index = CategoricalIndex(
list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan]}, index=exp_index)
assert_frame_equal(result, expected, check_index_type=True)
# not all labels in the categories
with pytest.raises(KeyError):
self.df2.loc[['a', 'd']]
def test_loc_listlike_dtypes(self):
# GH 11586
# unique categories and codes
index = CategoricalIndex(['a', 'b', 'c'])
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, index=index)
# unique slice
res = df.loc[['a', 'b']]
exp_index = CategoricalIndex(['a', 'b'],
categories=index.categories)
exp = DataFrame({'A': [1, 2], 'B': [4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp, check_index_type=True)
# duplicated slice
res = df.loc[['a', 'a', 'b']]
exp_index = CategoricalIndex(['a', 'a', 'b'],
categories=index.categories)
exp = DataFrame({'A': [1, 1, 2], 'B': [4, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp, check_index_type=True)
with tm.assert_raises_regex(
KeyError,
'a list-indexer must only include values that are '
'in the categories'):
df.loc[['a', 'x']]
# duplicated categories and codes
index = CategoricalIndex(['a', 'b', 'a'])
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, index=index)
# unique slice
res = df.loc[['a', 'b']]
exp = DataFrame({'A': [1, 3, 2],
'B': [4, 6, 5]},
index=CategoricalIndex(['a', 'a', 'b']))
tm.assert_frame_equal(res, exp, check_index_type=True)
# duplicated slice
res = df.loc[['a', 'a', 'b']]
exp = DataFrame(
{'A': [1, 3, 1, 3, 2],
'B': [4, 6, 4, 6, 5
]}, index=CategoricalIndex(['a', 'a', 'a', 'a', 'b']))
tm.assert_frame_equal(res, exp, check_index_type=True)
with tm.assert_raises_regex(
KeyError,
'a list-indexer must only include values '
'that are in the categories'):
df.loc[['a', 'x']]
# contains unused category
index = CategoricalIndex(
['a', 'b', 'a', 'c'], categories=list('abcde'))
df = DataFrame({'A': [1, 2, 3, 4], 'B': [5, 6, 7, 8]}, index=index)
res = df.loc[['a', 'b']]
exp = DataFrame({'A': [1, 3, 2], 'B': [5, 7, 6]},
index=CategoricalIndex(['a', 'a', 'b'],
categories=list('abcde')))
tm.assert_frame_equal(res, exp, check_index_type=True)
res = df.loc[['a', 'e']]
exp = DataFrame({'A': [1, 3, np.nan], 'B': [5, 7, np.nan]},
index=CategoricalIndex(['a', 'a', 'e'],
categories=list('abcde')))
tm.assert_frame_equal(res, exp, check_index_type=True)
# duplicated slice
res = df.loc[['a', 'a', 'b']]
exp = DataFrame({'A': [1, 3, 1, 3, 2], 'B': [5, 7, 5, 7, 6]},
index=CategoricalIndex(['a', 'a', 'a', 'a', 'b'],
categories=list('abcde')))
tm.assert_frame_equal(res, exp, check_index_type=True)
with tm.assert_raises_regex(
KeyError,
'a list-indexer must only include values '
'that are in the categories'):
df.loc[['a', 'x']]
def test_get_indexer_array(self):
arr = np.array([Timestamp('1999-12-31 00:00:00'),
Timestamp('2000-12-31 00:00:00')], dtype=object)
cats = [Timestamp('1999-12-31 00:00:00'),
Timestamp('2000-12-31 00:00:00')]
ci = CategoricalIndex(cats,
categories=cats,
ordered=False, dtype='category')
result = ci.get_indexer(arr)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_same_categories_same_order(self):
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'])
result = ci.get_indexer(CategoricalIndex(['b', 'b'],
categories=['a', 'b']))
expected = np.array([1, 1], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19551
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'])
result = ci.get_indexer(CategoricalIndex(['b', 'b'],
categories=['b', 'a']))
expected = np.array([1, 1], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
def test_getitem_with_listlike(self):
# GH 16115
cats = Categorical([Timestamp('12-31-1999'),
Timestamp('12-31-2000')])
expected = DataFrame([[1, 0], [0, 1]], dtype='uint8',
index=[0, 1], columns=cats)
dummies = pd.get_dummies(cats)
result = dummies[[c for c in dummies.columns]]
assert_frame_equal(result, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
tm.assert_numpy_array_equal(result, np.array([5], dtype='int8'))
def test_ix_categorical_index(self):
# GH 12531
df = DataFrame(np.random.randn(3, 3),
index=list('ABC'), columns=list('XYZ'))
cdf = df.copy()
cdf.index = CategoricalIndex(df.index)
cdf.columns = CategoricalIndex(df.columns)
expect = Series(df.loc['A', :], index=cdf.columns, name='A')
assert_series_equal(cdf.loc['A', :], expect)
expect = Series(df.loc[:, 'X'], index=cdf.index, name='X')
assert_series_equal(cdf.loc[:, 'X'], expect)
exp_index = CategoricalIndex(list('AB'), categories=['A', 'B', 'C'])
expect = DataFrame(df.loc[['A', 'B'], :], columns=cdf.columns,
index=exp_index)
assert_frame_equal(cdf.loc[['A', 'B'], :], expect)
exp_columns = CategoricalIndex(list('XY'),
categories=['X', 'Y', 'Z'])
expect = DataFrame(df.loc[:, ['X', 'Y']], index=cdf.index,
columns=exp_columns)
assert_frame_equal(cdf.loc[:, ['X', 'Y']], expect)
# non-unique
df = DataFrame(np.random.randn(3, 3),
index=list('ABA'), columns=list('XYX'))
cdf = df.copy()
cdf.index = CategoricalIndex(df.index)
cdf.columns = CategoricalIndex(df.columns)
exp_index = CategoricalIndex(list('AA'), categories=['A', 'B'])
expect = DataFrame(df.loc['A', :], columns=cdf.columns,
index=exp_index)
assert_frame_equal(cdf.loc['A', :], expect)
exp_columns = CategoricalIndex(list('XX'), categories=['X', 'Y'])
expect = DataFrame(df.loc[:, 'X'], index=cdf.index,
columns=exp_columns)
assert_frame_equal(cdf.loc[:, 'X'], expect)
expect = DataFrame(df.loc[['A', 'B'], :], columns=cdf.columns,
index=CategoricalIndex(list('AAB')))
assert_frame_equal(cdf.loc[['A', 'B'], :], expect)
expect = DataFrame(df.loc[:, ['X', 'Y']], index=cdf.index,
columns=CategoricalIndex(list('XXY')))
assert_frame_equal(cdf.loc[:, ['X', 'Y']], expect)
def test_read_only_source(self):
# GH 10043
rw_array = np.eye(10)
rw_df = DataFrame(rw_array)
ro_array = np.eye(10)
ro_array.setflags(write=False)
ro_df = DataFrame(ro_array)
assert_frame_equal(rw_df.iloc[[1, 2, 3]], ro_df.iloc[[1, 2, 3]])
assert_frame_equal(rw_df.iloc[[1]], ro_df.iloc[[1]])
assert_series_equal(rw_df.iloc[1], ro_df.iloc[1])
assert_frame_equal(rw_df.iloc[1:3], ro_df.iloc[1:3])
assert_frame_equal(rw_df.loc[[1, 2, 3]], ro_df.loc[[1, 2, 3]])
assert_frame_equal(rw_df.loc[[1]], ro_df.loc[[1]])
assert_series_equal(rw_df.loc[1], ro_df.loc[1])
assert_frame_equal(rw_df.loc[1:3], ro_df.loc[1:3])
def test_reindexing(self):
# reindexing
# convert to a regular index
result = self.df2.reindex(['a', 'b', 'e'])
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan],
'B': Series(list('aaabbe'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['a', 'b'])
expected = DataFrame({'A': [0, 1, 5, 2, 3],
'B': Series(list('aaabb'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['e'])
expected = DataFrame({'A': [np.nan],
'B': Series(['e'])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['d'])
expected = DataFrame({'A': [np.nan],
'B': Series(['d'])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
# since we are actually reindexing with a Categorical
# then return a Categorical
cats = list('cabe')
result = self.df2.reindex(Categorical(['a', 'd'], categories=cats))
expected = DataFrame({'A': [0, 1, 5, np.nan],
'B': Series(list('aaad')).astype(
CDT(cats))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(Categorical(['a'], categories=cats))
expected = DataFrame({'A': [0, 1, 5],
'B': Series(list('aaa')).astype(
CDT(cats))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['a', 'b', 'e'])
expected = DataFrame({'A': [0, 1, 5, 2, 3, np.nan],
'B': Series(list('aaabbe'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['a', 'b'])
expected = DataFrame({'A': [0, 1, 5, 2, 3],
'B': Series(list('aaabb'))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(['e'])
expected = DataFrame({'A': [np.nan],
'B': Series(['e'])}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
# give back the type of categorical that we received
result = self.df2.reindex(Categorical(
['a', 'd'], categories=cats, ordered=True))
expected = DataFrame(
{'A': [0, 1, 5, np.nan],
'B': Series(list('aaad')).astype(
CDT(cats, ordered=True))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
result = self.df2.reindex(Categorical(
['a', 'd'], categories=['a', 'd']))
expected = DataFrame({'A': [0, 1, 5, np.nan],
'B': Series(list('aaad')).astype(
CDT(['a', 'd']))}).set_index('B')
assert_frame_equal(result, expected, check_index_type=True)
# passed duplicate indexers are not allowed
pytest.raises(ValueError, lambda: self.df2.reindex(['a', 'a']))
# args NotImplemented ATM
pytest.raises(NotImplementedError,
lambda: self.df2.reindex(['a'], method='ffill'))
pytest.raises(NotImplementedError,
lambda: self.df2.reindex(['a'], level=1))
pytest.raises(NotImplementedError,
lambda: self.df2.reindex(['a'], limit=2))
def test_loc_slice(self):
# slicing
# not implemented ATM
# GH9748
pytest.raises(TypeError, lambda: self.df.loc[1:5])
# result = df.loc[1:5]
# expected = df.iloc[[1,2,3,4]]
# assert_frame_equal(result, expected)
def test_boolean_selection(self):
df3 = self.df3
df4 = self.df4
result = df3[df3.index == 'a']
expected = df3.iloc[[]]
assert_frame_equal(result, expected)
result = df4[df4.index == 'a']
expected = df4.iloc[[]]
assert_frame_equal(result, expected)
result = df3[df3.index == 1]
expected = df3.iloc[[0, 1, 3]]
assert_frame_equal(result, expected)
result = df4[df4.index == 1]
expected = df4.iloc[[0, 1, 3]]
assert_frame_equal(result, expected)
# since we have an ordered categorical
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=True,
# name=u'B')
result = df3[df3.index < 2]
expected = df3.iloc[[4]]
assert_frame_equal(result, expected)
result = df3[df3.index > 1]
expected = df3.iloc[[]]
assert_frame_equal(result, expected)
# unordered
# cannot be compared
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=False,
# name=u'B')
pytest.raises(TypeError, lambda: df4[df4.index < 2])
pytest.raises(TypeError, lambda: df4[df4.index > 1])
def test_indexing_with_category(self):
# https://github.com/pandas-dev/pandas/issues/12564
# consistent result if comparing as Dataframe
cat = DataFrame({'A': ['foo', 'bar', 'baz']})
exp = DataFrame({'A': [True, False, False]})
res = (cat[['A']] == 'foo')
tm.assert_frame_equal(res, exp)
cat['A'] = cat['A'].astype('category')
res = (cat[['A']] == 'foo')
tm.assert_frame_equal(res, exp)
def test_map_with_dict_or_series(self):
orig_values = ['a', 'B', 1, 'a']
new_values = ['one', 2, 3.0, 'one']
cur_index = pd.CategoricalIndex(orig_values, name='XXX')
expected = pd.CategoricalIndex(new_values,
name='XXX', categories=[3.0, 2, 'one'])
mapper = pd.Series(new_values[:-1], index=orig_values[:-1])
output = cur_index.map(mapper)
# Order of categories in output can be different
tm.assert_index_equal(expected, output)
mapper = {o: n for o, n in
zip(orig_values[:-1], new_values[:-1])}
output = cur_index.map(mapper)
# Order of categories in output can be different
tm.assert_index_equal(expected, output)
| bsd-3-clause |
rudischilder/gr10_2 | sw/tools/tcp_aircraft_server/phoenix/__init__.py | 86 | 4470 | #Copyright 2014, Antoine Drouin
"""
Phoenix is a Python library for interacting with Paparazzi
"""
import math
"""
Unit convertions
"""
def rad_of_deg(d): return d/180.*math.pi
def deg_of_rad(r): return r*180./math.pi
def rps_of_rpm(r): return r*2.*math.pi/60.
def rpm_of_rps(r): return r/2./math.pi*60.
def m_of_inch(i): return i*0.0254
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color' : 'k', 'fontsize' : 20 }
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig == None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend <> None:
ax.legend(legend, loc='best')
if xlim <> None:
ax.set_xlim(xlim[0], xlim[1])
if ylim <> None:
ax.set_ylim(ylim[0], ylim[1])
"""
Messages
"""
#: dictionary mapping the C type to its length in bytes (e.g char -> 1)
TYPE_TO_LENGTH_MAP = {
"char" : 1,
"uint8" : 1,
"int8" : 1,
"uint16" : 2,
"int16" : 2,
"uint32" : 4,
"int32" : 4,
"float" : 4,
"double" : 8,
}
#: dictionary mapping the C type to correct format string
TYPE_TO_PRINT_MAP = {
float : "%f",
str : "%s",
chr : "%c",
int : "%d"
}
ACID_ALL = 0xFF
ACID_TEST = 0xFE
ACID_GROUNDSTATION = 0xFD
#: dictionary mapping debug types to format characters
DEBUG_MESSAGES = {
"DEBUG_UINT8" : "%d",
"DEBUG_INT32" : "%d",
"DEBUG_FLOAT" : "%#f"
}
"""
Binary logs
See format description in sw/airborne/subsystems/datalink/fms_link.c
"""
import struct
def hex_of_bin(b): return ' '.join( [ "%02X" % ord( x ) for x in b ] )
import pdb
def read_binary_log(filename, tick_freq = 2*512.):
f = open(filename, "rb")
d = f.read()
packet_header_len = 6
msg_header_len = 2
def read_packet(d, packet_start):
payload_start = packet_start+packet_header_len
timestamp, payload_len = struct.unpack("IH", d[packet_start:payload_start])
msgs = read_packet_payload(d, payload_start, payload_len)
next_packet = payload_start+payload_len+2
return timestamp, msgs, next_packet
def read_packet_payload(d, s, l):
msgs = []
packet_end = s+l; msg_start = s
while msg_start<packet_end:
payload_start = msg_start+msg_header_len
msg_len, msg_id = struct.unpack("BB", d[msg_start:payload_start])
payload_end = payload_start+msg_len
msg_payload = d[payload_start:payload_end]
msgs.append([msg_id, msg_payload])
#print msg_id, msg_len, hex_of_bin(msg_payload)
msg_start = payload_end
return msgs
packets = []
packet_start=0
while packet_start<len(d):
timestamp, msgs, next_packet = read_packet(d, packet_start)
packets.append([timestamp/tick_freq, msgs])
#print timestamp, msgs
packet_start = next_packet
f.close()
return packets
def extract_from_binary_log(protocol, packets, msg_names, t_min=None, t_max=None):
ret = [{'time':[], 'data':[]} for m in msg_names]
if t_min == None: t_min = packets[0][0]
if t_max == None: t_max = packets[-1][0]
for t, msgs in packets:
if t>= t_min and t<= t_max:
for id, payload in msgs:
m = protocol.get_message_by_id('telemetry', id)
try: i = msg_names.index(m.name)
except: pass
finally: ret[i]['time'].append(t); ret[i]['data'].append(m.unpack_scaled_values(payload))
return ret
| gpl-2.0 |
teonlamont/pyeparse | pyeparse/viz.py | 2 | 17741 | # Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import math
from collections import deque
from functools import partial
from .utils import create_chunks, fwhm_kernel_2d
from ._fixes import string_types
def plot_raw(raw, events=None, title='Raw', show=True):
"""Visualize raw data traces
Parameters
----------
raw : instance of pyeparse raw
The raw object to be visualized
events : array | None
Events associated with the Raw instance.
title : str
The title to be displayed.
show : bool
Whether to show the figure or not.
Returns
-------
fig : matplotlib.figure.Figure instance
The resulting figure object.
"""
import matplotlib.pyplot as mpl
data, times = raw[:, :]
names = raw.info['sample_fields']
ev_x = np.array([0], int) if events is None else events[:, 0]
fig = mpl.figure()
n_row = len(names)
ax0 = None
for ii, (d, n) in enumerate(zip(data, names)):
ax = mpl.subplot(n_row, 1, ii + 1, sharex=ax0)
if ax0 is None:
ax0 = ax
ev_y = np.tile(np.array([np.min(d), np.max(d), np.nan]), len(ev_x))
ax.plot(np.repeat(times[ev_x], 3), ev_y, color='y')
ax.plot(times, d, color='k')
if ii == n_row - 1:
ax.set_xlabel('Time (sec)')
ax.set_ylabel(n)
if show:
mpl.show()
return fig
def plot_calibration(raw, title='Calibration', show=True):
"""Visualize calibration
Parameters
----------
raw : instance of pyeparse raw
The raw object to be visualized
title : str
The title to be displayed.
show : bool
Whether to show the figure or not.
Returns
-------
figs : list of of matplotlib.figure.Figure instances
The resulting figure object
"""
import matplotlib.pyplot as mpl
figs = []
for cal in raw.info['calibrations']:
fig = mpl.figure()
figs.append(fig)
px, py = cal['point_x'], cal['point_y']
dx, dy = cal['diff_x'], cal['diff_y']
mpl.title(title)
mpl.scatter(px, py, color='gray')
mpl.scatter(px - dx, py - dy, color='red')
mpl.show() if show else None
return figs
def _plot_heatmap(xdata, ydata, width, height, cmap=None,
vmax=None, colorbar=True,
kernel=dict(size=20, half_width=10), show=True):
""" Plot heatmap of X/Y positions on canvas"""
import matplotlib.pyplot as mpl
if cmap is None:
cmap = 'RdBu_r'
canvas = np.zeros((width, height))
data = np.c_[xdata, ydata]
with np.errstate(invalid='ignore'):
mask = ((data[:, 0] > 0) & (data[:, 1] > 0) &
(data[:, 0] < width) & (data[:, 1] < height))
inds = data[mask].astype('i4')
if kernel is not None:
my_kernel = fwhm_kernel_2d(kernel['size'],
kernel['half_width'])
hsize = kernel['size'] // 2
for x, y in inds:
if kernel is not None:
kern_indx = np.array([x - hsize, x + hsize])
kern_indx[kern_indx < 0] = 0
kern_indx = slice(*kern_indx)
kern_indy = np.array([y - hsize, y + hsize])
kern_indy[kern_indy < 0] = 0
kern_indy = slice(*kern_indy)
this_part = canvas[kern_indx, kern_indy]
if this_part.shape == my_kernel.shape:
this_part += my_kernel
else:
canvas[x, y] += 1
fig = mpl.figure()
if vmax is None:
vmin = canvas.min()
vmax = canvas.max()
else:
vmax = vmax
vmin = -vmax
# flip canvas to match width > height
canvas = canvas.T
mpl.imshow(canvas, extent=[0, width, 0, height],
cmap=cmap, aspect='auto', origin='lower', vmin=vmin,
vmax=vmax)
mpl.colorbar() if colorbar else None
mpl.show() if show else None
return fig, canvas
def plot_heatmap_raw(raw, start=None, stop=None, cmap=None,
title=None, vmax=None, kernel=dict(size=20, width=10),
show=True, colorbar=True):
""" Plot heatmap of X/Y positions on canvas, e.g., screen
Parameters
----------
raw : instance of pyeparse raw
The raw object to be visualized
start : float | None
Start time in seconds.
stop : float | None
End time in seconds.
cmap : matplotlib Colormap
The colormap to use.
title : str
The title to be displayed.
vmax : float | None
The maximum (and -minimum) value to use for the colormap.
kernel : dict
Parameters for the smoothing kernel (size, half_width).
colorbar : bool
Whether to show the colorbar.
show : bool
Whether to show the figure or not.
Returns
-------
fig : instance of matplotlib.figure.Figure
The resulting figure object
"""
import matplotlib.pyplot as mpl
width, height = raw.info['screen_coords']
if isinstance(start, float):
start = raw.time_as_index([start])[0]
if isinstance(stop, float):
stop = raw.time_as_index([stop])[0]
data, times = raw[:2, start:stop]
xdata, ydata = data
fig, _ = _plot_heatmap(xdata=xdata, ydata=ydata, width=width,
height=height, cmap=cmap, vmax=vmax,
colorbar=False, show=False)
if title is None:
tstart, tstop = times[start:stop][[0, -1]]
title = 'Raw data | {0} - {1} seconds'.format(tstart, tstop)
mpl.title(title)
mpl.xlabel('X position (px)')
mpl.ylabel('y position (px)')
mpl.colorbar() if colorbar else None
mpl.show() if show else None
return fig
"""
Note. The following functions are based on Denis A. Engemann's and
Eric Larson's contribution to MNE-Python
"""
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar"""
import matplotlib.pyplot as mpl
old_val = mpl.rcParams['toolbar']
try:
mpl.rcParams['toolbar'] = 'none'
fig = mpl.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
keys = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in keys:
fig.canvas.callbacks.disconnect(key)
except Exception:
raise
finally:
mpl.rcParams['toolbar'] = old_val
return fig
def _prepare_trellis(n_cells, max_col):
"""Aux function
"""
import matplotlib.pyplot as mpl
if n_cells == 1:
nrow = ncol = 1
elif n_cells <= max_col:
nrow, ncol = 1, n_cells
else:
nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
fig, axes = mpl.subplots(nrow, ncol)
axes = [axes] if ncol == nrow == 1 else axes.flatten()
for ax in axes[n_cells:]: # hide unused axes
ax.set_visible(False)
return fig, axes
def _draw_epochs_axes(epoch_idx, data, times, axes,
title_str, axes_handler, discretes,
discrete_colors):
"""Aux functioin"""
this = axes_handler[0]
data = np.ma.masked_invalid(data)
import matplotlib.pyplot as mpl
for ii, data_, ax in zip(epoch_idx, data, axes):
[l.set_data(times, d) for l, d in zip(ax.lines, data_)]
n_disc_lines = 0
if discrete_colors is not None:
color = discrete_colors[ii]
else:
color = 'orange'
if discretes is not None:
if discretes[ii] is not None:
for here in discretes[ii]:
ax.axvline(here, color=color, linestyle='--')
n_disc_lines += 1
vars(ax.lines[-1])['def-col'] = color
if title_str is not None:
_set_title(ax, title_str, ii)
ax.set_ylim(data.min(), data.max())
if ii % 5:
[l.set_visible(0) for l in ax.get_yticklabels()]
if ii < len(epoch_idx) - 5:
[l.set_visible(0) for l in ax.get_xticklabels()]
else:
[l.set_fontsize(8) for l in ax.get_xticklabels()]
[l.set_fontsize(8) for l in ax.get_yticklabels()]
labels = ax.get_xticklabels()
mpl.setp(labels, rotation=45)
ax.get_figure().canvas.draw()
vars(ax)[this]['n_disc_lines'] = n_disc_lines
if vars(ax)[this]['reject'] is True:
# memorizing reject
[l.set_color((0.8, 0.8, 0.8)) for l in ax.lines]
ax.get_figure().canvas.draw()
else:
# forgetting previous reject
for k in axes_handler:
if k == this:
continue
if vars(ax).get(k, {}).get('reject', None) is True:
[l.set_color(vars(l)['def-col']) for l in ax.lines]
ax.get_figure().canvas.draw()
break
def _epochs_navigation_onclick(event, params):
"""Aux function"""
import matplotlib.pyplot as mpl
p = params
here = None
if event.inaxes == p['back'].ax:
here = 1
elif event.inaxes == p['next'].ax:
here = -1
elif event.inaxes == p['reject-quit'].ax:
p['epochs'].drop_epochs(p['reject_idx'])
mpl.close(p['fig'])
mpl.close(event.inaxes.get_figure())
if here is not None and len(p['axes_handler']) > 1:
before = p['axes_handler'][0]
for ax in p['axes']:
assert all([ii in vars(ax) for ii in p['axes_handler']])
dd = -vars(ax)[before]['n_disc_lines']
if dd:
del ax.lines[dd:]
ax.get_figure().canvas.draw()
dd = 0
p['idx_handler'].rotate(here)
p['axes_handler'].rotate(here)
this_idx = p['idx_handler'][0]
data = p['epochs'].data[this_idx][:, p['picks']]
_draw_epochs_axes(this_idx, data, p['times'], p['axes'],
p['title_str'],
p['axes_handler'],
p['discretes'],
p['discrete_colors'])
# XXX don't ask me why
p['axes'][0].get_figure().canvas.draw()
def _epochs_axes_onclick(event, params):
"""Aux function"""
reject_color = (0.8, 0.8, 0.8)
ax = event.inaxes
p = params
here = vars(ax)[p['axes_handler'][0]]
if here.get('reject', None) is False:
idx = here['idx']
if idx not in p['reject_idx']:
p['reject_idx'].append(idx)
for line in ax.lines:
vars(line)['def-col'] = line.get_color()
line.set_color(reject_color)
here['reject'] = True
elif here.get('reject', None) is True:
idx = here['idx']
if idx in p['reject_idx']:
p['reject_idx'].pop(p['reject_idx'].index(idx))
for line in ax.lines:
line.set_color(vars(line)['def-col'])
here['reject'] = False
ax.get_figure().canvas.draw()
def _set_title(ax, title_str, ii):
"""Handle titles"""
if isinstance(title_str, string_types):
title = title_str % ii
elif title_str is None:
title = '#%00i' % ii
else:
title = title_str[ii]
ax.set_title(title, fontsize=12)
def plot_epochs(epochs, epoch_idx=None, picks=None, n_chunks=20,
title_str='#%003i', show=True, draw_discrete=None,
discrete_colors=None, block=False):
""" Visualize single trials using Trellis plot.
Parameters
----------
epochs : instance of pyeparse.Epochs
The epochs object
epoch_idx : array-like | int | None
The epochs to visualize. If None, the first 20 epochs are shown.
Defaults to None.
n_chunks : int
The number of chunks to use for display.
picks : array-like | None
Channels to be included. If None only good data channels are used.
Defaults to None
lines : array-like | list of tuple
Events to draw as vertical lines
title_str : None | str | list-like
The string formatting to use for axes titles. If None, no titles
will be shown. Defaults expand to ``#001, #002, ...``. If list-like,
must be of same length as epochs.events.
show : bool
Whether to show the figure or not.
draw_discrete : {saccades, blinks, fixations} | list-like | None |
The events to draw as vertical lines.
discrete_colors: : list-like | None
list of str or color objects with length of discrete events drawn.
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on a
sub plot.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as mpl
if np.isscalar(epoch_idx):
epoch_idx = [epoch_idx]
if epoch_idx is None:
n_events = len(epochs.events)
epoch_idx = range(n_events)
else:
n_events = len(epoch_idx)
if picks is None:
picks = np.arange(len(epochs.info['data_cols']))
elif all(p in epochs.ch_names for p in picks):
# epochs.data does not include time
ch_names = [ch for ch in epochs.ch_names if ch in
epochs.info['data_cols']]
picks = [ch_names.index(k) for k in picks]
elif any(p not in epochs.ch_names and isinstance(p, string_types)
for p in picks):
wrong = [p for p in picks if p not in epochs.ch_names]
raise ValueError('Some channels are not defined: ' + '; '.join(wrong))
if len(picks) < 1:
raise RuntimeError('No appropriate channels found. Please'
' check your picks')
if discrete_colors is not None:
if len(discrete_colors) != len(epochs.events):
raise ValueError('The length of `discrete_colors` must equal '
'the number of epochs.')
times = epochs.times * 1e3
n_traces = len(picks)
# preallocation needed for min / max scaling
n_events = len(epochs.events)
epoch_idx = epoch_idx[:n_events]
idx_handler = deque(create_chunks(epoch_idx, n_chunks))
# handle bads
this_idx = idx_handler[0]
fig, axes = _prepare_trellis(len(this_idx), max_col=5)
axes_handler = deque(range(len(idx_handler)))
data = np.ma.masked_invalid(epochs.data[this_idx][:, picks])
if isinstance(draw_discrete, string_types):
key = dict()
for k in epochs.info['discretes']:
key[k.strip('_')] = k
key = key[draw_discrete]
discretes = [d['stime'] * 1e3 for d in vars(epochs)[key]
if d is not None]
elif draw_discrete is None:
discretes = None
else:
discretes = draw_discrete
labelfontsize = 10
for i_ax, (ii, ax, data_) in enumerate(zip(this_idx, axes, data)):
ax.plot(times, data_.T, color='k')
ax.axvline(0.0, color='gray')
vars(ax.lines[-1])['def-col'] = 'gray'
n_disc_lines = 0
if discrete_colors is not None:
color = discrete_colors[ii]
else:
color = 'orange'
if discretes is not None:
if discretes[ii] is not None:
for here in discretes[ii]:
ax.axvline(here, color=color, linestyle='--')
n_disc_lines += 1
vars(ax.lines[-1])['def-col'] = color
if title_str is not None:
_set_title(ax, title_str, ii)
ax.set_ylim(data.min(), data.max())
if i_ax % 5:
[l.set_visible(0) for l in ax.get_yticklabels()]
else:
[l.set_fontsize(labelfontsize) for l in ax.get_yticklabels()]
if i_ax < len(this_idx) - 5:
[l.set_visible(0) for l in ax.get_xticklabels()]
else:
[l.set_fontsize(labelfontsize) for l in ax.get_xticklabels()]
labels = ax.get_xticklabels()
mpl.setp(labels, rotation=45)
vars(ax)[axes_handler[0]] = {'idx': ii, 'reject': False,
'n_disc_lines': n_disc_lines}
# XXX find smarter way to to tight layout for incomplete plots
# fig.tight_layout()
# initialize memory
for this_view, this_inds in zip(axes_handler, idx_handler):
if this_view > 0:
# all other views than the current one
for ii, ax in enumerate(axes):
vars(ax)[this_view] = {'idx': ii, 'reject': False,
'n_disc_lines': 0}
navigation = figure_nobar(figsize=(3, 1.5))
from matplotlib import gridspec
gs = gridspec.GridSpec(2, 2)
ax1 = mpl.subplot(gs[0, 0])
ax2 = mpl.subplot(gs[0, 1])
ax3 = mpl.subplot(gs[1, :])
params = {
'fig': fig,
'idx_handler': idx_handler,
'epochs': epochs,
'n_traces': n_traces,
'picks': picks,
'times': times,
'axes': axes,
'back': mpl.Button(ax1, 'back'),
'next': mpl.Button(ax2, 'next'),
'reject-quit': mpl.Button(ax3, 'reject-quit'),
'title_str': title_str,
'reject_idx': [],
'axes_handler': axes_handler,
'discretes': discretes,
'discrete_colors': discrete_colors,
}
fig.canvas.mpl_connect('button_press_event',
partial(_epochs_axes_onclick, params=params))
navigation.canvas.mpl_connect('button_press_event',
partial(_epochs_navigation_onclick,
params=params))
mpl.show(block=block) if show else None
return fig
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/indexes/interval/test_construction.py | 2 | 16294 | from functools import partial
import numpy as np
import pytest
from pandas.core.dtypes.common import is_categorical_dtype
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas import (
Categorical,
CategoricalIndex,
Float64Index,
Index,
Int64Index,
Interval,
IntervalIndex,
date_range,
notna,
period_range,
timedelta_range,
)
from pandas.core.arrays import IntervalArray
import pandas.core.common as com
import pandas.util.testing as tm
@pytest.fixture(params=[None, "foo"])
def name(request):
return request.param
class Base:
"""
Common tests for all variations of IntervalIndex construction. Input data
to be supplied in breaks format, then converted by the subclass method
get_kwargs_from_breaks to the expected format.
"""
@pytest.mark.parametrize(
"breaks",
[
[3, 14, 15, 92, 653],
np.arange(10, dtype="int64"),
Int64Index(range(-10, 11)),
Float64Index(np.arange(20, 30, 0.5)),
date_range("20180101", periods=10),
date_range("20180101", periods=10, tz="US/Eastern"),
timedelta_range("1 day", periods=10),
],
)
def test_constructor(self, constructor, breaks, closed, name):
result_kwargs = self.get_kwargs_from_breaks(breaks, closed)
result = constructor(closed=closed, name=name, **result_kwargs)
assert result.closed == closed
assert result.name == name
assert result.dtype.subtype == getattr(breaks, "dtype", "int64")
tm.assert_index_equal(result.left, Index(breaks[:-1]))
tm.assert_index_equal(result.right, Index(breaks[1:]))
@pytest.mark.parametrize(
"breaks, subtype",
[
(Int64Index([0, 1, 2, 3, 4]), "float64"),
(Int64Index([0, 1, 2, 3, 4]), "datetime64[ns]"),
(Int64Index([0, 1, 2, 3, 4]), "timedelta64[ns]"),
(Float64Index([0, 1, 2, 3, 4]), "int64"),
(date_range("2017-01-01", periods=5), "int64"),
(timedelta_range("1 day", periods=5), "int64"),
],
)
def test_constructor_dtype(self, constructor, breaks, subtype):
# GH 19262: conversion via dtype parameter
expected_kwargs = self.get_kwargs_from_breaks(breaks.astype(subtype))
expected = constructor(**expected_kwargs)
result_kwargs = self.get_kwargs_from_breaks(breaks)
iv_dtype = IntervalDtype(subtype)
for dtype in (iv_dtype, str(iv_dtype)):
result = constructor(dtype=dtype, **result_kwargs)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("breaks", [[np.nan] * 2, [np.nan] * 4, [np.nan] * 50])
def test_constructor_nan(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_subtype = np.float64
expected_values = np.array(breaks[:-1], dtype=object)
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(result._ndarray_values, expected_values)
@pytest.mark.parametrize(
"breaks",
[
[],
np.array([], dtype="int64"),
np.array([], dtype="float64"),
np.array([], dtype="datetime64[ns]"),
np.array([], dtype="timedelta64[ns]"),
],
)
def test_constructor_empty(self, constructor, breaks, closed):
# GH 18421
result_kwargs = self.get_kwargs_from_breaks(breaks)
result = constructor(closed=closed, **result_kwargs)
expected_values = np.array([], dtype=object)
expected_subtype = getattr(breaks, "dtype", np.int64)
assert result.empty
assert result.closed == closed
assert result.dtype.subtype == expected_subtype
tm.assert_numpy_array_equal(result._ndarray_values, expected_values)
@pytest.mark.parametrize(
"breaks",
[
tuple("0123456789"),
list("abcdefghij"),
np.array(list("abcdefghij"), dtype=object),
np.array(list("abcdefghij"), dtype="<U1"),
],
)
def test_constructor_string(self, constructor, breaks):
# GH 19016
msg = (
"category, object, and string subtypes are not supported "
"for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
constructor(**self.get_kwargs_from_breaks(breaks))
@pytest.mark.parametrize("cat_constructor", [Categorical, CategoricalIndex])
def test_constructor_categorical_valid(self, constructor, cat_constructor):
# GH 21243/21253
if isinstance(constructor, partial) and constructor.func is Index:
# Index is defined to create CategoricalIndex from categorical data
pytest.skip()
breaks = np.arange(10, dtype="int64")
expected = IntervalIndex.from_breaks(breaks)
cat_breaks = cat_constructor(breaks)
result_kwargs = self.get_kwargs_from_breaks(cat_breaks)
result = constructor(**result_kwargs)
tm.assert_index_equal(result, expected)
def test_generic_errors(self, constructor):
# filler input data to be used when supplying invalid kwargs
filler = self.get_kwargs_from_breaks(range(10))
# invalid closed
msg = "invalid option for 'closed': invalid"
with pytest.raises(ValueError, match=msg):
constructor(closed="invalid", **filler)
# unsupported dtype
msg = "dtype must be an IntervalDtype, got int64"
with pytest.raises(TypeError, match=msg):
constructor(dtype="int64", **filler)
# invalid dtype
msg = "data type 'invalid' not understood"
with pytest.raises(TypeError, match=msg):
constructor(dtype="invalid", **filler)
# no point in nesting periods in an IntervalIndex
periods = period_range("2000-01-01", periods=10)
periods_kwargs = self.get_kwargs_from_breaks(periods)
msg = "Period dtypes are not supported, use a PeriodIndex instead"
with pytest.raises(ValueError, match=msg):
constructor(**periods_kwargs)
# decreasing values
decreasing_kwargs = self.get_kwargs_from_breaks(range(10, -1, -1))
msg = "left side of interval must be <= right side"
with pytest.raises(ValueError, match=msg):
constructor(**decreasing_kwargs)
class TestFromArrays(Base):
"""Tests specific to IntervalIndex.from_arrays"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_arrays
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_arrays
"""
return {"left": breaks[:-1], "right": breaks[1:]}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list("01234abcde"), ordered=True)
msg = (
"category, object, and string subtypes are not supported "
"for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_arrays(data[:-1], data[1:])
# unequal length
left = [0, 1, 2]
right = [2, 3]
msg = "left and right must have the same length"
with pytest.raises(ValueError, match=msg):
IntervalIndex.from_arrays(left, right)
@pytest.mark.parametrize(
"left_subtype, right_subtype", [(np.int64, np.float64), (np.float64, np.int64)]
)
def test_mixed_float_int(self, left_subtype, right_subtype):
"""mixed int/float left/right results in float for both sides"""
left = np.arange(9, dtype=left_subtype)
right = np.arange(1, 10, dtype=right_subtype)
result = IntervalIndex.from_arrays(left, right)
expected_left = Float64Index(left)
expected_right = Float64Index(right)
expected_subtype = np.float64
tm.assert_index_equal(result.left, expected_left)
tm.assert_index_equal(result.right, expected_right)
assert result.dtype.subtype == expected_subtype
class TestFromBreaks(Base):
"""Tests specific to IntervalIndex.from_breaks"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_breaks
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_breaks
"""
return {"breaks": breaks}
def test_constructor_errors(self):
# GH 19016: categorical data
data = Categorical(list("01234abcde"), ordered=True)
msg = (
"category, object, and string subtypes are not supported "
"for IntervalIndex"
)
with pytest.raises(TypeError, match=msg):
IntervalIndex.from_breaks(data)
def test_length_one(self):
"""breaks of length one produce an empty IntervalIndex"""
breaks = [0]
result = IntervalIndex.from_breaks(breaks)
expected = IntervalIndex.from_breaks([])
tm.assert_index_equal(result, expected)
class TestFromTuples(Base):
"""Tests specific to IntervalIndex.from_tuples"""
@pytest.fixture
def constructor(self):
return IntervalIndex.from_tuples
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by IntervalIndex.from_tuples
"""
if len(breaks) == 0:
return {"data": breaks}
tuples = list(zip(breaks[:-1], breaks[1:]))
if isinstance(breaks, (list, tuple)):
return {"data": tuples}
elif is_categorical_dtype(breaks):
return {"data": breaks._constructor(tuples)}
return {"data": com.asarray_tuplesafe(tuples)}
def test_constructor_errors(self):
# non-tuple
tuples = [(0, 1), 2, (3, 4)]
msg = "IntervalIndex.from_tuples received an invalid item, 2"
with pytest.raises(TypeError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
# too few/many items
tuples = [(0, 1), (2,), (3, 4)]
msg = "IntervalIndex.from_tuples requires tuples of length 2, got {t}"
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
tuples = [(0, 1), (2, 3, 4), (5, 6)]
with pytest.raises(ValueError, match=msg.format(t=tuples)):
IntervalIndex.from_tuples(tuples)
def test_na_tuples(self):
# tuple (NA, NA) evaluates the same as NA as an element
na_tuple = [(0, 1), (np.nan, np.nan), (2, 3)]
idx_na_tuple = IntervalIndex.from_tuples(na_tuple)
idx_na_element = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)])
tm.assert_index_equal(idx_na_tuple, idx_na_element)
class TestClassConstructors(Base):
"""Tests specific to the IntervalIndex/Index constructors"""
@pytest.fixture(
params=[IntervalIndex, partial(Index, dtype="interval")],
ids=["IntervalIndex", "Index"],
)
def constructor(self, request):
return request.param
def get_kwargs_from_breaks(self, breaks, closed="right"):
"""
converts intervals in breaks format to a dictionary of kwargs to
specific to the format expected by the IntervalIndex/Index constructors
"""
if len(breaks) == 0:
return {"data": breaks}
ivs = [
Interval(l, r, closed) if notna(l) else l
for l, r in zip(breaks[:-1], breaks[1:])
]
if isinstance(breaks, list):
return {"data": ivs}
elif is_categorical_dtype(breaks):
return {"data": breaks._constructor(ivs)}
return {"data": np.array(ivs, dtype=object)}
def test_generic_errors(self, constructor):
"""
override the base class implementation since errors are handled
differently; checks unnecessary since caught at the Interval level
"""
pass
def test_constructor_string(self):
# GH23013
# When forming the interval from breaks,
# the interval of strings is already forbidden.
pass
def test_constructor_errors(self, constructor):
# mismatched closed within intervals with no constructor override
ivs = [Interval(0, 1, closed="right"), Interval(2, 3, closed="left")]
msg = "intervals must all be closed on the same side"
with pytest.raises(ValueError, match=msg):
constructor(ivs)
# scalar
msg = (
r"IntervalIndex\(...\) must be called with a collection of "
"some kind, 5 was passed"
)
with pytest.raises(TypeError, match=msg):
constructor(5)
# not an interval
msg = "type <class 'numpy.int64'> with value 0 is not an interval"
with pytest.raises(TypeError, match=msg):
constructor([0, 1])
@pytest.mark.parametrize(
"data, closed",
[
([], "both"),
([np.nan, np.nan], "neither"),
(
[Interval(0, 3, closed="neither"), Interval(2, 5, closed="neither")],
"left",
),
(
[Interval(0, 3, closed="left"), Interval(2, 5, closed="right")],
"neither",
),
(IntervalIndex.from_breaks(range(5), closed="both"), "right"),
],
)
def test_override_inferred_closed(self, constructor, data, closed):
# GH 19370
if isinstance(data, IntervalIndex):
tuples = data.to_tuples()
else:
tuples = [(iv.left, iv.right) if notna(iv) else iv for iv in data]
expected = IntervalIndex.from_tuples(tuples, closed=closed)
result = constructor(data, closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"values_constructor", [list, np.array, IntervalIndex, IntervalArray]
)
def test_index_object_dtype(self, values_constructor):
# Index(intervals, dtype=object) is an Index (not an IntervalIndex)
intervals = [Interval(0, 1), Interval(1, 2), Interval(2, 3)]
values = values_constructor(intervals)
result = Index(values, dtype=object)
assert type(result) is Index
tm.assert_numpy_array_equal(result.values, np.array(values))
def test_index_mixed_closed(self):
# GH27172
intervals = [
Interval(0, 1, closed="left"),
Interval(1, 2, closed="right"),
Interval(2, 3, closed="neither"),
Interval(3, 4, closed="both"),
]
result = Index(intervals)
expected = Index(intervals, dtype=object)
tm.assert_index_equal(result, expected)
class TestFromIntervals(TestClassConstructors):
"""
Tests for IntervalIndex.from_intervals, which is deprecated in favor of the
IntervalIndex constructor. Same tests as the IntervalIndex constructor,
plus deprecation test. Should only need to delete this class when removed.
"""
@pytest.fixture
def constructor(self):
def from_intervals_ignore_warnings(*args, **kwargs):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
return IntervalIndex.from_intervals(*args, **kwargs)
return from_intervals_ignore_warnings
def test_deprecated(self):
ivs = [Interval(0, 1), Interval(1, 2)]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
IntervalIndex.from_intervals(ivs)
@pytest.mark.skip(reason="parent class test that is not applicable")
def test_index_object_dtype(self):
pass
@pytest.mark.skip(reason="parent class test that is not applicable")
def test_index_mixed_closed(self):
pass
| apache-2.0 |
loli/semisupervisedforests | examples/svm/plot_weighted_samples.py | 69 | 1942 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasis the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
beiko-lab/timeclust | ananke/_tabulate.py | 2 | 17275 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module provides functionality for tabulating sequence files (FASTA) into
an Ananke HDF5 file.
"""
import sys
import warnings
import hashlib
import pandas as pd
import numpy as np
from collections import defaultdict
from scipy.sparse import csr_matrix
from ._database import TimeSeriesData
# TODO: - Add an observer to the timeseriesdb class for progress
def read_metadata(metadata_path, time_name, time_mask):
"""Take in a path, metadata_path, read file into a pandas dataframe.
Validate that time_name and time_mask (if applicable) are present.
Parameters
----------
metadata_path: str
filepath to metadata file
time_name: str
name of column in metadata file that contains time points as integers
relative to the starting time point
time_mask: str
name of column in metadata file that contains the masking category
that delineates multiple time series
Returns
-------
metadata_mapping: pandas.DataFrame
Pandas dataframe containing sample metadata
"""
metadata_mapping = pd.read_csv(metadata_path, sep="\t", header=0)
try:
time_points = np.sort(metadata_mapping[time_name].unique())
except:
raise KeyError("Specified time point column name (%s) is not found " \
"in metadata file." % (time_name,))
# Check if #SampleID is a column (required for QIIME metadata format)
if "#SampleID" not in metadata_mapping:
raise KeyError("Metadata mapping file does not start with #SampleID.")
if time_mask is not None:
if time_mask not in metadata_mapping:
raise KeyError("Specified time mask column name (%s) is not " \
"found in metadata file." % (time_mask,))
else:
#Get the values sorted by mask first, then time points
metadata_mapping = metadata_mapping.sort_values(by=[time_mask,
time_name])
else:
metadata_mapping = metadata_mapping.sort_values(by=time_name)
return metadata_mapping
def tabulate(seqf, metadata_mapping, size_labels):
"""Count the unique sequences in a FASTA file, tabulating by sample.
Parameters
----------
seqf: file
input FASTA sequence file (not wrapped, two lines per record)
metadata_mapping: pandas.DataFrame
metadata table contained in Pandas DataFrame
size_labels: boolean
true if FASTA file is already compressed to unique sequences (and
contains USEARCH-style size annotations in the label, i.e.,
>SEQUENCEID;size=####;
Returns
-------
seqcount: dict {int:{str:int}}
dict of dicts, with first key as the DNA sequence hash, second key
as the sample name, and final int value as the sequence count within
that sample
"""
i = 0
samplenames = set(list(metadata_mapping["#SampleID"]))
sample_name_array = np.array(metadata_mapping["#SampleID"])
seqcount = defaultdict(lambda: defaultdict(int))
prev_sample_name = ""
#Keep track of skipped sequences
skipped = 0
skipped_samples = set()
for line in seqf:
assert line[0] == ">", "Label line began with %s, not >. Is " \
"your FASTA file one-line-per-sequence?" % (line[0],)
#Assume first line is header
sample_name = line.split("_")[0][1:]
if sample_name != prev_sample_name:
if sample_name in samplenames:
prev_sample_name = sample_name
else:
#Skip the next sequence
# Readline if py3, next if py2
if sys.version_info[0] >= 3:
seqf.readline()
else:
seqf.next()
i += 1
skipped += 1
skipped_samples.add(sample_name)
continue
if sys.version_info[0] >= 3:
sequence = seqf.readline().strip()
else:
sequence = seqf.next().strip()
assert sequence[0] != ">", "Expected sequence, got label. Is \
your FASTA file one-line-per-sequence?"
if size_labels:
if (";" not in line) | ("=" not in line):
raise ValueError("FASTA size labels specified but not found.")
size = line.strip().split(";")[-1].split("=")[-1]
seqcount[sequence][sample_name] += int(size)
else:
seqcount[sequence][sample_name] += 1
i+=1
#This needs to be replaced by something better
if (i%10000 == 0):
sys.stdout.write("\r%d" % i)
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
if (skipped > 0):
warnings.warn("Skipped %d sequences (no match to sample name" \
"in metadata file). Sample names: %s" % (skipped, \
str(list(skipped_samples))))
return seqcount
def write_csr(timeseriesdb, seqcount, outseqf, sample_name_array):
"""Convert the seqcount dict structure to a compressed sparse row matrix,
then write it to an Ananke TimeSeriesData object and a FASTA file, in
chunks.
Parameters
----------
timeseriesdb: ananke._database.TimeSeriesData
TimeSeriesData object that encapsulates a .h5 file
seqcount: dict {int:{str:int}}
dict of dicts output from tabulate function
outseqf: file
a file object pointing to a FASTA file that will contain unique
sequences and their size data
sample_name_array: numpy.array
an array containing the sample names, taken from the metadata mapping
Returns
-------
unique_indices: list
contains all of the sample names included in the Ananke data file
"""
# Set stage for a CSR sparse matrix
data = []
indptr = []
indices = []
# Use to ensure all indices exist at the end
unique_indices = []
hashseq_list = []
j = 0
# From the seqcount dict containing the time-series for each hash
# build up a CSR matrix structure
# At the same time, print each unique sequence and its size to a FASTA
# file that can be used for clustering and taxonomic classification
for sequence, abundance_dict in seqcount.items():
md5hash = hashlib.md5()
md5hash.update(sequence.encode())
hashseq = md5hash.hexdigest()
hashseq_list.append(hashseq)
abundance_list = []
rowsum = 0
indptr.append(j)
for col, sample_name in enumerate(sample_name_array):
if sample_name in abundance_dict.keys():
abundance = abundance_dict[sample_name]
rowsum += abundance
data.append(abundance)
indices.append(col)
if (col not in unique_indices):
unique_indices.append(col)
j += 1
#Write the unique sequence file
outseqf.write(">%s;size=%d;\n" % (hashseq,rowsum))
outseqf.write("%s\n" % sequence)
#Don't let this get too large before dumping it to disk
if (len(indptr) >= 500):
timeseriesdb.add_timeseries_data(data, indices, \
indptr, hashseq_list)
# Clear out the existing data
hashseq_list = []
data = []
indices = []
indptr = []
# Add any data that's left
timeseriesdb.add_timeseries_data(data, indices, indptr, hashseq_list)
return unique_indices
def hash_sequence(sequence):
md5hash = hashlib.md5()
md5hash.update(sequence.encode())
return md5hash.hexdigest()
def fasta_to_ananke(sequence_path, metadata_path, time_name, \
timeseriesdata_path, outseq_path, time_mask=None, \
size_labels = False):
"""Count the unique sequences in a FASTA file, tabulating by time points.
Save the results to an Ananke HDF5 file.
"""
# Grab the metadata from the file
metadata_mapping = read_metadata(metadata_path, time_name, time_mask)
# Now open the sequence file
# Input format assumptions:
#- sequences and headers take 1 line each (i.e., not wrapped FASTA)
#- no blank lines
# Open files for reading and writing
seqf = open(sequence_path,'r')
outseqf = open(outseq_path, 'w')
timeseriesdb = TimeSeriesData(timeseriesdata_path)
# Iterate through the input FASTA file, tabulating unique sequences
seqcount = tabulate(seqf, metadata_mapping, size_labels)
print("Writing table to file")
# Get the shape of the data
sample_name_array = np.array(metadata_mapping["#SampleID"])
ngenes = len(seqcount)
nsamples = len(sample_name_array)
nobs = 0
for sequence, abundance_dict in seqcount.items():
nobs += len(abundance_dict)
# Resize the Ananke TimeSeriesData object
timeseriesdb.resize_data(ngenes, nsamples, nobs)
timeseriesdb.insert_array_by_chunks("samples/names", sample_name_array)
timeseriesdb.insert_array_by_chunks("samples/time",
metadata_mapping[time_name],
transform_func = float)
if time_mask is not None:
timeseriesdb.insert_array_by_chunks("samples/mask",
metadata_mapping[time_mask])
else:
#Set a dummy mask
timeseriesdb.insert_array_by_chunks("samples/mask",
[1]*len(sample_name_array))
unique_indices = write_csr(timeseriesdb, seqcount, \
outseqf, sample_name_array)
print("Done writing to %s" % timeseriesdata_path)
# Check consistency of the data, warn if samples are missing
if (len(unique_indices) < nsamples):
warnings.warn("Number of time-points retrieved from sequence " \
"file is less than the number of samples in metadata file. " \
"%d samples are missing. Consider removing extraneous samples " \
"from the metadata file. You will want to run `ananke filter` to " \
"remove empty samples."
% (nsamples - len(unique_indices),))
def dada2_to_ananke(table_path, metadata_path, time_name, timeseriesdata_path,
outseq_path, time_mask=None):
"""Converts a DADA2 table from dada2::makeSequenceTable to an Ananke HDF5
file. Table must have sequences as rows, samples/time-points as columns
(i.e., output from DADA2 should be transposed). Should be exported using
`write.table(t(seqtab), table_path)` from R.
Parameters
----------
table_path: str
Path to the csv table output from DADA2.
metadata_path: str
Path to the tab-separated metadata file.
time_name: str
Name of the column that contains the time points as integers, offset
from zero.
timerseriesdata_path: str
Path to the new output Ananke HDF5 file.
outseq_path: str
Path to the new output unique sequence FASTA file.
time_mask: str (optional)
Name of the column that contains the time-series masking information.
"""
# Grab the metadata from the file
metadata_mapping = read_metadata(metadata_path, time_name, time_mask)
if time_mask is not None:
metadata_mapping = metadata_mapping.sort_values([time_mask,
time_name])
else:
metadata_mapping = metadata_mapping.sort_values([time_name])
# Now open the sequence file
# Input format assumptions:
#- sequences and headers take 1 line each (i.e., not wrapped FASTA)
#- no blank lines
# Open files for reading and writing
outseqf = open(outseq_path, 'w')
timeseriesdb = TimeSeriesData(timeseriesdata_path)
# Open table file, read it with pandas
seqtab = pd.read_csv(table_path, sep=" ")
print("Writing table to file")
# Get the shape of the data
sample_name_array = np.array(metadata_mapping["#SampleID"])
ngenes = seqtab.shape[0]
nsamples = len(sample_name_array)
# Pare down the sequence tab to include only the necessary samples
# sorted in order of mask then time points
seqtab = seqtab.loc[:, sample_name_array]
# Sparse-ify it
csr_seqtab = csr_matrix(seqtab)
nobs = csr_seqtab.nnz
# Resize the Ananke TimeSeriesData object
timeseriesdb.resize_data(ngenes, nsamples, nobs)
timeseriesdb.insert_array_by_chunks("samples/names", sample_name_array)
timeseriesdb.insert_array_by_chunks("samples/time",
metadata_mapping[time_name],
transform_func = float)
if time_mask is not None:
timeseriesdb.insert_array_by_chunks("samples/mask",
metadata_mapping[time_mask])
else:
#Set a dummy mask
timeseriesdb.insert_array_by_chunks("samples/mask",
[1]*len(sample_name_array))
seqhashes = [ hash_sequence(x) for x in seqtab.index ]
# Export sequences to FASTA
for i in range(0, ngenes):
total = seqtab.iloc[i].sum()
if total > 0:
seqhash = seqhashes[i]
outseqf.write(">%s;size=%d;\n" % (seqhash, total))
outseqf.write(seqtab.index[i].strip() + "\n")
timeseriesdb.insert_array_by_chunks("genes/sequenceids", seqhashes)
timeseriesdb.insert_array_by_chunks("timeseries/data",
csr_seqtab.data,
int)
timeseriesdb.insert_array_by_chunks("timeseries/indptr",
csr_seqtab.indptr,
int)
timeseriesdb.insert_array_by_chunks("timeseries/indices",
csr_seqtab.indices,
int)
print("Done writing to %s" % timeseriesdata_path)
def csv_to_ananke(csv_path, metadata_path, time_name, timeseriesdata_path,
time_mask=None):
"""Imports a CSV file and metadata file into an Ananke HDF5 file.
The CSV file must have the time-series as rows and the samples/
time points as columns. The first line must be a header that contains
the sample names.
Parameters
----------
csv_path: Location of the
metadata_path: Location of the metadata mapping file.
time_name: Column name containing the time points in the metadata file.
timeseriesdata_path: Output Ananke .h5 file path.
time_mask: Column name containing the time mask indicator in the metadata
file.
"""
# Pull in the metadata reading
metadata_mapping = read_metadata(metadata_path, time_name, time_mask)
if time_mask is not None:
metadata_mapping = metadata_mapping.sort_values([time_mask,
time_name])
else:
metadata_mapping = metadata_mapping.sort_values([time_name])
csv_table = pd.read_csv(csv_path, header=0,
index_col=0, sep="\t")
# Construct the Ananke object
timeseriesdb = TimeSeriesData(timeseriesdata_path)
# Sort columns by time point
sample_name_array = np.array(metadata_mapping["#SampleID"])
ngenes = csv_table.shape[0]
nsamples = len(sample_name_array)
# Pare down the sequence tab to include only the necessary samples
# sorted in order of mask then time points
csv_table = csv_table.loc[:, sample_name_array]
#Get row (i.e., time series) names
seqids = csv_table.index
#Convert the table to a sparse matrix for storage in CSR format
csr_mat = csr_matrix(csv_table.to_sparse().to_coo())
nobs = csr_mat.nnz
timeseriesdb.resize_data(ngenes, nsamples, nobs)
timeseriesdb.insert_array_by_chunks("samples/names", sample_name_array)
timeseriesdb.insert_array_by_chunks("samples/time",
metadata_mapping[time_name],
transform_func = float)
if time_mask is not None:
timeseriesdb.insert_array_by_chunks("samples/mask",
metadata_mapping[time_mask])
else:
#Set a dummy mask
timeseriesdb.insert_array_by_chunks("samples/mask",
[1]*len(sample_name_array))
timeseriesdb.insert_array_by_chunks("genes/sequenceids", seqids)
timeseriesdb.insert_array_by_chunks("timeseries/data",
csr_mat.data,
int)
timeseriesdb.insert_array_by_chunks("timeseries/indptr",
csr_mat.indptr,
int)
timeseriesdb.insert_array_by_chunks("timeseries/indices",
csr_mat.indices,
int)
print("Done writing to %s" % timeseriesdata_path)
| gpl-3.0 |
gwulfs/zipline | zipline/algorithm.py | 2 | 48191 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
import warnings
import pytz
import pandas as pd
import numpy as np
from datetime import datetime
from itertools import groupby, chain
from six.moves import filter
from six import (
exec_,
iteritems,
itervalues,
string_types,
)
from operator import attrgetter
from zipline.errors import (
AddTermPostInit,
OrderDuringInitialize,
OverrideCommissionPostInit,
OverrideSlippagePostInit,
RegisterAccountControlPostInit,
RegisterTradingControlPostInit,
UnsupportedCommissionModel,
UnsupportedOrderParameters,
UnsupportedSlippageModel,
UnsupportedDatetimeFormat,
)
from zipline.finance.trading import TradingEnvironment
from zipline.finance.blotter import Blotter
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance.controls import (
LongOnly,
MaxOrderCount,
MaxOrderSize,
MaxPositionSize,
MaxLeverage,
RestrictedListOrder
)
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.finance.performance import PerformanceTracker
from zipline.finance.slippage import (
VolumeShareSlippage,
SlippageModel,
transact_partial
)
from zipline.assets import Asset, Future
from zipline.assets.futures import FutureChain
from zipline.gens.composites import date_sorted_sources
from zipline.gens.tradesimulation import AlgorithmSimulator
from zipline.modelling.engine import (
NoOpFFCEngine,
SimpleFFCEngine,
)
from zipline.sources import DataFrameSource, DataPanelSource
from zipline.utils.api_support import (
api_method,
require_not_initialized,
ZiplineAPI,
)
import zipline.utils.events
from zipline.utils.events import (
EventManager,
make_eventrule,
DateRuleFactory,
TimeRuleFactory,
)
from zipline.utils.factory import create_simulation_parameters
from zipline.utils.math_utils import tolerant_equals
import zipline.protocol
from zipline.protocol import Event
from zipline.history import HistorySpec
from zipline.history.history_container import HistoryContainer
DEFAULT_CAPITAL_BASE = float("1.0e5")
class TradingAlgorithm(object):
"""
Base class for trading algorithms. Inherit and overload
initialize() and handle_data(data).
A new algorithm could look like this:
```
from zipline.api import order, symbol
def initialize(context):
context.sid = symbol('AAPL')
context.amount = 100
def handle_data(context, data):
sid = context.sid
amount = context.amount
order(sid, amount)
```
To then to run this algorithm pass these functions to
TradingAlgorithm:
my_algo = TradingAlgorithm(initialize, handle_data)
stats = my_algo.run(data)
"""
def __init__(self, *args, **kwargs):
"""Initialize sids and other state variables.
:Arguments:
:Optional:
initialize : function
Function that is called with a single
argument at the begninning of the simulation.
handle_data : function
Function that is called with 2 arguments
(context and data) on every bar.
script : str
Algoscript that contains initialize and
handle_data function definition.
data_frequency : {'daily', 'minute'}
The duration of the bars.
capital_base : float <default: 1.0e5>
How much capital to start with.
instant_fill : bool <default: False>
Whether to fill orders immediately or on next bar.
asset_finder : An AssetFinder object
A new AssetFinder object to be used in this TradingEnvironment
asset_metadata: can be either:
- dict
- pandas.DataFrame
- object with 'read' property
If dict is provided, it must have the following structure:
* keys are the identifiers
* values are dicts containing the metadata, with the metadata
field name as the key
If pandas.DataFrame is provided, it must have the
following structure:
* column names must be the metadata fields
* index must be the different asset identifiers
* array contents should be the metadata value
If an object with a 'read' property is provided, 'read' must
return rows containing at least one of 'sid' or 'symbol' along
with the other metadata fields.
identifiers : List
Any asset identifiers that are not provided in the
asset_metadata, but will be traded by this TradingAlgorithm
"""
self.sources = []
# List of trading controls to be used to validate orders.
self.trading_controls = []
# List of account controls to be checked on each bar.
self.account_controls = []
self._recorded_vars = {}
self.namespace = kwargs.get('namespace', {})
self._platform = kwargs.pop('platform', 'zipline')
self.logger = None
self.benchmark_return_source = None
# default components for transact
self.slippage = VolumeShareSlippage()
self.commission = PerShare()
self.instant_fill = kwargs.pop('instant_fill', False)
# set the capital base
self.capital_base = kwargs.pop('capital_base', DEFAULT_CAPITAL_BASE)
self.sim_params = kwargs.pop('sim_params', None)
if self.sim_params is None:
self.sim_params = create_simulation_parameters(
capital_base=self.capital_base,
start=kwargs.pop('start', None),
end=kwargs.pop('end', None)
)
self.perf_tracker = PerformanceTracker(self.sim_params)
# Update the TradingEnvironment with the provided asset metadata
self.trading_environment = kwargs.pop('env',
TradingEnvironment.instance())
self.trading_environment.update_asset_finder(
asset_finder=kwargs.pop('asset_finder', None),
asset_metadata=kwargs.pop('asset_metadata', None),
identifiers=kwargs.pop('identifiers', None)
)
# Pull in the environment's new AssetFinder for quick reference
self.asset_finder = self.trading_environment.asset_finder
self.init_engine(kwargs.pop('ffc_loader', None))
# Maps from name to Term
self._filters = {}
self._factors = {}
self._classifiers = {}
self.blotter = kwargs.pop('blotter', None)
if not self.blotter:
self.blotter = Blotter()
# Set the dt initally to the period start by forcing it to change
self.on_dt_changed(self.sim_params.period_start)
# The symbol lookup date specifies the date to use when resolving
# symbols to sids, and can be set using set_symbol_lookup_date()
self._symbol_lookup_date = None
self.portfolio_needs_update = True
self.account_needs_update = True
self.performance_needs_update = True
self._portfolio = None
self._account = None
self.history_container_class = kwargs.pop(
'history_container_class', HistoryContainer,
)
self.history_container = None
self.history_specs = {}
# If string is passed in, execute and get reference to
# functions.
self.algoscript = kwargs.pop('script', None)
self._initialize = None
self._before_trading_start = None
self._analyze = None
self.event_manager = EventManager()
if self.algoscript is not None:
filename = kwargs.pop('algo_filename', None)
if filename is None:
filename = '<string>'
code = compile(self.algoscript, filename, 'exec')
exec_(code, self.namespace)
self._initialize = self.namespace.get('initialize')
if 'handle_data' not in self.namespace:
raise ValueError('You must define a handle_data function.')
else:
self._handle_data = self.namespace['handle_data']
self._before_trading_start = \
self.namespace.get('before_trading_start')
# Optional analyze function, gets called after run
self._analyze = self.namespace.get('analyze')
elif kwargs.get('initialize') and kwargs.get('handle_data'):
if self.algoscript is not None:
raise ValueError('You can not set script and \
initialize/handle_data.')
self._initialize = kwargs.pop('initialize')
self._handle_data = kwargs.pop('handle_data')
self._before_trading_start = kwargs.pop('before_trading_start',
None)
self.event_manager.add_event(
zipline.utils.events.Event(
zipline.utils.events.Always(),
# We pass handle_data.__func__ to get the unbound method.
# We will explicitly pass the algorithm to bind it again.
self.handle_data.__func__,
),
prepend=True,
)
# If method not defined, NOOP
if self._initialize is None:
self._initialize = lambda x: None
# Alternative way of setting data_frequency for backwards
# compatibility.
if 'data_frequency' in kwargs:
self.data_frequency = kwargs.pop('data_frequency')
self._most_recent_data = None
# Prepare the algo for initialization
self.initialized = False
self.initialize_args = args
self.initialize_kwargs = kwargs
def init_engine(self, loader):
"""
Construct and save an FFCEngine from loader.
If loader is None, constructs a NoOpFFCEngine.
"""
if loader is not None:
self.engine = SimpleFFCEngine(
loader,
self.trading_environment.trading_days,
self.asset_finder,
)
else:
self.engine = NoOpFFCEngine()
def initialize(self, *args, **kwargs):
"""
Call self._initialize with `self` made available to Zipline API
functions.
"""
with ZiplineAPI(self):
self._initialize(self)
def before_trading_start(self, data):
if self._before_trading_start is None:
return
self._before_trading_start(self, data)
def handle_data(self, data):
self._most_recent_data = data
if self.history_container:
self.history_container.update(data, self.datetime)
self._handle_data(self, data)
# Unlike trading controls which remain constant unless placing an
# order, account controls can change each bar. Thus, must check
# every bar no matter if the algorithm places an order or not.
self.validate_account_controls()
def analyze(self, perf):
if self._analyze is None:
return
with ZiplineAPI(self):
self._analyze(self, perf)
def __repr__(self):
"""
N.B. this does not yet represent a string that can be used
to instantiate an exact copy of an algorithm.
However, it is getting close, and provides some value as something
that can be inspected interactively.
"""
return """
{class_name}(
capital_base={capital_base}
sim_params={sim_params},
initialized={initialized},
slippage={slippage},
commission={commission},
blotter={blotter},
recorded_vars={recorded_vars})
""".strip().format(class_name=self.__class__.__name__,
capital_base=self.capital_base,
sim_params=repr(self.sim_params),
initialized=self.initialized,
slippage=repr(self.slippage),
commission=repr(self.commission),
blotter=repr(self.blotter),
recorded_vars=repr(self.recorded_vars))
def _create_data_generator(self, source_filter, sim_params=None):
"""
Create a merged data generator using the sources attached to this
algorithm.
::source_filter:: is a method that receives events in date
sorted order, and returns True for those events that should be
processed by the zipline, and False for those that should be
skipped.
"""
if sim_params is None:
sim_params = self.sim_params
if self.benchmark_return_source is None:
if sim_params.data_frequency == 'minute' or \
sim_params.emission_rate == 'minute':
def update_time(date):
return self.trading_environment.get_open_and_close(date)[1]
else:
def update_time(date):
return date
benchmark_return_source = [
Event({'dt': update_time(dt),
'returns': ret,
'type': zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
'source_id': 'benchmarks'})
for dt, ret in
self.trading_environment.benchmark_returns.iteritems()
if dt.date() >= sim_params.period_start.date() and
dt.date() <= sim_params.period_end.date()
]
else:
benchmark_return_source = self.benchmark_return_source
date_sorted = date_sorted_sources(*self.sources)
if source_filter:
date_sorted = filter(source_filter, date_sorted)
with_benchmarks = date_sorted_sources(benchmark_return_source,
date_sorted)
# Group together events with the same dt field. This depends on the
# events already being sorted.
return groupby(with_benchmarks, attrgetter('dt'))
def _create_generator(self, sim_params, source_filter=None):
"""
Create a basic generator setup using the sources to this algorithm.
::source_filter:: is a method that receives events in date
sorted order, and returns True for those events that should be
processed by the zipline, and False for those that should be
skipped.
"""
if not self.initialized:
self.initialize(*self.initialize_args, **self.initialize_kwargs)
self.initialized = True
if self.perf_tracker is None:
# HACK: When running with the `run` method, we set perf_tracker to
# None so that it will be overwritten here.
self.perf_tracker = PerformanceTracker(sim_params)
self.portfolio_needs_update = True
self.account_needs_update = True
self.performance_needs_update = True
self.data_gen = self._create_data_generator(source_filter, sim_params)
self.trading_client = AlgorithmSimulator(self, sim_params)
transact_method = transact_partial(self.slippage, self.commission)
self.set_transact(transact_method)
return self.trading_client.transform(self.data_gen)
def get_generator(self):
"""
Override this method to add new logic to the construction
of the generator. Overrides can use the _create_generator
method to get a standard construction generator.
"""
return self._create_generator(self.sim_params)
# TODO: make a new subclass, e.g. BatchAlgorithm, and move
# the run method to the subclass, and refactor to put the
# generator creation logic into get_generator.
def run(self, source, overwrite_sim_params=True,
benchmark_return_source=None):
"""Run the algorithm.
:Arguments:
source : can be either:
- pandas.DataFrame
- zipline source
- list of sources
If pandas.DataFrame is provided, it must have the
following structure:
* column names must be the different asset identifiers
* index must be DatetimeIndex
* array contents should be price info.
:Returns:
daily_stats : pandas.DataFrame
Daily performance metrics such as returns, alpha etc.
"""
# Ensure that source is a DataSource object
if isinstance(source, list):
if overwrite_sim_params:
warnings.warn("""List of sources passed, will not attempt to extract start and end
dates. Make sure to set the correct fields in sim_params passed to
__init__().""", UserWarning)
overwrite_sim_params = False
elif isinstance(source, pd.DataFrame):
# if DataFrame provided, map columns to sids and wrap
# in DataFrameSource
copy_frame = source.copy()
copy_frame.columns = \
self.asset_finder.map_identifier_index_to_sids(
source.columns, source.index[0]
)
source = DataFrameSource(copy_frame)
elif isinstance(source, pd.Panel):
# If Panel provided, map items to sids and wrap
# in DataPanelSource
copy_panel = source.copy()
copy_panel.items = self.asset_finder.map_identifier_index_to_sids(
source.items, source.major_axis[0]
)
source = DataPanelSource(copy_panel)
if isinstance(source, list):
self.set_sources(source)
else:
self.set_sources([source])
# Override sim_params if params are provided by the source.
if overwrite_sim_params:
if hasattr(source, 'start'):
self.sim_params.period_start = source.start
if hasattr(source, 'end'):
self.sim_params.period_end = source.end
# Changing period_start and period_close might require updating
# of first_open and last_close.
self.sim_params._update_internal()
# The sids field of the source is the reference for the universe at
# the start of the run
self._current_universe = set()
for source in self.sources:
for sid in source.sids:
self._current_universe.add(sid)
# Check that all sids from the source are accounted for in
# the AssetFinder. This retrieve call will raise an exception if the
# sid is not found.
for sid in self._current_universe:
self.asset_finder.retrieve_asset(sid)
# force a reset of the performance tracker, in case
# this is a repeat run of the algorithm.
self.perf_tracker = None
# create zipline
self.gen = self._create_generator(self.sim_params)
# Create history containers
if self.history_specs:
self.history_container = self.history_container_class(
self.history_specs,
self.current_universe(),
self.sim_params.first_open,
self.sim_params.data_frequency,
)
# loop through simulated_trading, each iteration returns a
# perf dictionary
perfs = []
for perf in self.gen:
perfs.append(perf)
# convert perf dict to pandas dataframe
daily_stats = self._create_daily_stats(perfs)
self.analyze(daily_stats)
return daily_stats
def _create_daily_stats(self, perfs):
# create daily and cumulative stats dataframe
daily_perfs = []
# TODO: the loop here could overwrite expected properties
# of daily_perf. Could potentially raise or log a
# warning.
for perf in perfs:
if 'daily_perf' in perf:
perf['daily_perf'].update(
perf['daily_perf'].pop('recorded_vars')
)
perf['daily_perf'].update(perf['cumulative_risk_metrics'])
daily_perfs.append(perf['daily_perf'])
else:
self.risk_report = perf
daily_dts = [np.datetime64(perf['period_close'], utc=True)
for perf in daily_perfs]
daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)
return daily_stats
@api_method
def add_transform(self, transform, days=None):
"""
Ensures that the history container will have enough size to service
a simple transform.
:Arguments:
transform : string
The transform to add. must be an element of:
{'mavg', 'stddev', 'vwap', 'returns'}.
days : int <default=None>
The maximum amount of days you will want for this transform.
This is not needed for 'returns'.
"""
if transform not in {'mavg', 'stddev', 'vwap', 'returns'}:
raise ValueError('Invalid transform')
if transform == 'returns':
if days is not None:
raise ValueError('returns does use days')
self.add_history(2, '1d', 'price')
return
elif days is None:
raise ValueError('no number of days specified')
if self.sim_params.data_frequency == 'daily':
mult = 1
freq = '1d'
else:
mult = 390
freq = '1m'
bars = mult * days
self.add_history(bars, freq, 'price')
if transform == 'vwap':
self.add_history(bars, freq, 'volume')
@api_method
def get_environment(self, field='platform'):
env = {
'arena': self.sim_params.arena,
'data_frequency': self.sim_params.data_frequency,
'start': self.sim_params.first_open,
'end': self.sim_params.last_close,
'capital_base': self.sim_params.capital_base,
'platform': self._platform
}
if field == '*':
return env
else:
return env[field]
def add_event(self, rule=None, callback=None):
"""
Adds an event to the algorithm's EventManager.
"""
self.event_manager.add_event(
zipline.utils.events.Event(rule, callback),
)
@api_method
def schedule_function(self,
func,
date_rule=None,
time_rule=None,
half_days=True):
"""
Schedules a function to be called with some timed rules.
"""
date_rule = date_rule or DateRuleFactory.every_day()
time_rule = ((time_rule or TimeRuleFactory.market_open())
if self.sim_params.data_frequency == 'minute' else
# If we are in daily mode the time_rule is ignored.
zipline.utils.events.Always())
self.add_event(
make_eventrule(date_rule, time_rule, half_days),
func,
)
@api_method
def record(self, *args, **kwargs):
"""
Track and record local variable (i.e. attributes) each day.
"""
# Make 2 objects both referencing the same iterator
args = [iter(args)] * 2
# Zip generates list entries by calling `next` on each iterator it
# receives. In this case the two iterators are the same object, so the
# call to next on args[0] will also advance args[1], resulting in zip
# returning (a,b) (c,d) (e,f) rather than (a,a) (b,b) (c,c) etc.
positionals = zip(*args)
for name, value in chain(positionals, iteritems(kwargs)):
self._recorded_vars[name] = value
@api_method
def symbol(self, symbol_str):
"""
Default symbol lookup for any source that directly maps the
symbol to the Asset (e.g. yahoo finance).
"""
# If the user has not set the symbol lookup date,
# use the period_end as the date for sybmol->sid resolution.
_lookup_date = self._symbol_lookup_date if self._symbol_lookup_date is not None \
else self.sim_params.period_end
return self.asset_finder.lookup_symbol_resolve_multiple(
symbol_str,
as_of_date=_lookup_date
)
@api_method
def symbols(self, *args):
"""
Default symbols lookup for any source that directly maps the
symbol to the Asset (e.g. yahoo finance).
"""
return [self.symbol(identifier) for identifier in args]
@api_method
def sid(self, a_sid):
"""
Default sid lookup for any source that directly maps the integer sid
to the Asset.
"""
return self.asset_finder.retrieve_asset(a_sid)
@api_method
def future_chain(self, root_symbol, as_of_date=None):
""" Look up a future chain with the specified parameters.
Parameters
----------
root_symbol : str
The root symbol of a future chain.
as_of_date : datetime.datetime or pandas.Timestamp or str, optional
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this date is
the primary contract, etc.
Returns
-------
FutureChain
The future chain matching the specified parameters.
Raises
------
RootSymbolNotFound
If a future chain could not be found for the given root symbol.
"""
if as_of_date:
try:
as_of_date = pd.Timestamp(as_of_date, tz='UTC')
except ValueError:
raise UnsupportedDatetimeFormat(input=as_of_date,
method='future_chain')
return FutureChain(
asset_finder=self.asset_finder,
get_datetime=self.get_datetime,
root_symbol=root_symbol.upper(),
as_of_date=as_of_date
)
def _calculate_order_value_amount(self, asset, value):
"""
Calculates how many shares/contracts to order based on the type of
asset being ordered.
"""
last_price = self.trading_client.current_data[asset].price
if tolerant_equals(last_price, 0):
zero_message = "Price of 0 for {psid}; can't infer value".format(
psid=asset
)
if self.logger:
self.logger.debug(zero_message)
# Don't place any order
return 0
if isinstance(asset, Future):
value_multiplier = asset.contract_multiplier
else:
value_multiplier = 1
return value / (last_price * value_multiplier)
@api_method
def order(self, sid, amount,
limit_price=None,
stop_price=None,
style=None):
"""
Place an order using the specified parameters.
"""
def round_if_near_integer(a, epsilon=1e-4):
"""
Round a to the nearest integer if that integer is within an epsilon
of a.
"""
if abs(a - round(a)) <= epsilon:
return round(a)
else:
return a
# Truncate to the integer share count that's either within .0001 of
# amount or closer to zero.
# E.g. 3.9999 -> 4.0; 5.5 -> 5.0; -5.5 -> -5.0
amount = int(round_if_near_integer(amount))
# Raises a ZiplineError if invalid parameters are detected.
self.validate_order_params(sid,
amount,
limit_price,
stop_price,
style)
# Convert deprecated limit_price and stop_price parameters to use
# ExecutionStyle objects.
style = self.__convert_order_params_for_blotter(limit_price,
stop_price,
style)
return self.blotter.order(sid, amount, style)
def validate_order_params(self,
asset,
amount,
limit_price,
stop_price,
style):
"""
Helper method for validating parameters to the order API function.
Raises an UnsupportedOrderParameters if invalid arguments are found.
"""
if not self.initialized:
raise OrderDuringInitialize(
msg="order() can only be called from within handle_data()"
)
if style:
if limit_price:
raise UnsupportedOrderParameters(
msg="Passing both limit_price and style is not supported."
)
if stop_price:
raise UnsupportedOrderParameters(
msg="Passing both stop_price and style is not supported."
)
if not isinstance(asset, Asset):
raise UnsupportedOrderParameters(
msg="Passing non-Asset argument to 'order()' is not supported."
" Use 'sid()' or 'symbol()' methods to look up an Asset."
)
for control in self.trading_controls:
control.validate(asset,
amount,
self.updated_portfolio(),
self.get_datetime(),
self.trading_client.current_data)
@staticmethod
def __convert_order_params_for_blotter(limit_price, stop_price, style):
"""
Helper method for converting deprecated limit_price and stop_price
arguments into ExecutionStyle instances.
This function assumes that either style == None or (limit_price,
stop_price) == (None, None).
"""
# TODO_SS: DeprecationWarning for usage of limit_price and stop_price.
if style:
assert (limit_price, stop_price) == (None, None)
return style
if limit_price and stop_price:
return StopLimitOrder(limit_price, stop_price)
if limit_price:
return LimitOrder(limit_price)
if stop_price:
return StopOrder(stop_price)
else:
return MarketOrder()
@api_method
def order_value(self, sid, value,
limit_price=None, stop_price=None, style=None):
"""
Place an order by desired value rather than desired number of shares.
If the requested sid is found in the universe, the requested value is
divided by its price to imply the number of shares to transact.
If the Asset being ordered is a Future, the 'value' calculated
is actually the exposure, as Futures have no 'value'.
value > 0 :: Buy/Cover
value < 0 :: Sell/Short
Market order: order(sid, value)
Limit order: order(sid, value, limit_price)
Stop order: order(sid, value, None, stop_price)
StopLimit order: order(sid, value, limit_price, stop_price)
"""
amount = self._calculate_order_value_amount(sid, value)
return self.order(sid, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@property
def recorded_vars(self):
return copy(self._recorded_vars)
@property
def portfolio(self):
return self.updated_portfolio()
def updated_portfolio(self):
if self.portfolio_needs_update:
self._portfolio = \
self.perf_tracker.get_portfolio(self.performance_needs_update)
self.portfolio_needs_update = False
self.performance_needs_update = False
return self._portfolio
@property
def account(self):
return self.updated_account()
def updated_account(self):
if self.account_needs_update:
self._account = \
self.perf_tracker.get_account(self.performance_needs_update)
self.account_needs_update = False
self.performance_needs_update = False
return self._account
def set_logger(self, logger):
self.logger = logger
def on_dt_changed(self, dt):
"""
Callback triggered by the simulation loop whenever the current dt
changes.
Any logic that should happen exactly once at the start of each datetime
group should happen here.
"""
assert isinstance(dt, datetime), \
"Attempt to set algorithm's current time with non-datetime"
assert dt.tzinfo == pytz.utc, \
"Algorithm expects a utc datetime"
self.datetime = dt
self.perf_tracker.set_date(dt)
self.blotter.set_date(dt)
@api_method
def get_datetime(self, tz=None):
"""
Returns the simulation datetime.
"""
dt = self.datetime
assert dt.tzinfo == pytz.utc, "Algorithm should have a utc datetime"
if tz is not None:
# Convert to the given timezone passed as a string or tzinfo.
if isinstance(tz, string_types):
tz = pytz.timezone(tz)
dt = dt.astimezone(tz)
return dt # datetime.datetime objects are immutable.
def set_transact(self, transact):
"""
Set the method that will be called to create a
transaction from open orders and trade events.
"""
self.blotter.transact = transact
def update_dividends(self, dividend_frame):
"""
Set DataFrame used to process dividends. DataFrame columns should
contain at least the entries in zp.DIVIDEND_FIELDS.
"""
self.perf_tracker.update_dividends(dividend_frame)
@api_method
def set_slippage(self, slippage):
if not isinstance(slippage, SlippageModel):
raise UnsupportedSlippageModel()
if self.initialized:
raise OverrideSlippagePostInit()
self.slippage = slippage
@api_method
def set_commission(self, commission):
if not isinstance(commission, (PerShare, PerTrade, PerDollar)):
raise UnsupportedCommissionModel()
if self.initialized:
raise OverrideCommissionPostInit()
self.commission = commission
@api_method
def set_symbol_lookup_date(self, dt):
"""
Set the date for which symbols will be resolved to their sids
(symbols may map to different firms or underlying assets at
different times)
"""
try:
self._symbol_lookup_date = pd.Timestamp(dt, tz='UTC')
except ValueError:
raise UnsupportedDatetimeFormat(input=dt,
method='set_symbol_lookup_date')
def set_sources(self, sources):
assert isinstance(sources, list)
self.sources = sources
# Remain backwards compatibility
@property
def data_frequency(self):
return self.sim_params.data_frequency
@data_frequency.setter
def data_frequency(self, value):
assert value in ('daily', 'minute')
self.sim_params.data_frequency = value
@api_method
def order_percent(self, sid, percent,
limit_price=None, stop_price=None, style=None):
"""
Place an order in the specified asset corresponding to the given
percent of the current portfolio value.
Note that percent must expressed as a decimal (0.50 means 50\%).
"""
value = self.portfolio.portfolio_value * percent
return self.order_value(sid, value,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def order_target(self, sid, target,
limit_price=None, stop_price=None, style=None):
"""
Place an order to adjust a position to a target number of shares. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target number of shares and the
current number of shares.
"""
if sid in self.portfolio.positions:
current_position = self.portfolio.positions[sid].amount
req_shares = target - current_position
return self.order(sid, req_shares,
limit_price=limit_price,
stop_price=stop_price,
style=style)
else:
return self.order(sid, target,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def order_target_value(self, sid, target,
limit_price=None, stop_price=None, style=None):
"""
Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
"""
target_amount = self._calculate_order_value_amount(sid, target)
return self.order_target(sid, target_amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def order_target_percent(self, sid, target,
limit_price=None, stop_price=None, style=None):
"""
Place an order to adjust a position to a target percent of the
current portfolio value. If the position doesn't already exist, this is
equivalent to placing a new order. If the position does exist, this is
equivalent to placing an order for the difference between the target
percent and the current percent.
Note that target must expressed as a decimal (0.50 means 50\%).
"""
target_value = self.portfolio.portfolio_value * target
return self.order_target_value(sid, target_value,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
def get_open_orders(self, sid=None):
if sid is None:
return {
key: [order.to_api_obj() for order in orders]
for key, orders in iteritems(self.blotter.open_orders)
if orders
}
if sid in self.blotter.open_orders:
orders = self.blotter.open_orders[sid]
return [order.to_api_obj() for order in orders]
return []
@api_method
def get_order(self, order_id):
if order_id in self.blotter.orders:
return self.blotter.orders[order_id].to_api_obj()
@api_method
def cancel_order(self, order_param):
order_id = order_param
if isinstance(order_param, zipline.protocol.Order):
order_id = order_param.id
self.blotter.cancel(order_id)
@api_method
def add_history(self, bar_count, frequency, field, ffill=True):
data_frequency = self.sim_params.data_frequency
history_spec = HistorySpec(bar_count, frequency, field, ffill,
data_frequency=data_frequency)
self.history_specs[history_spec.key_str] = history_spec
if self.initialized:
if self.history_container:
self.history_container.ensure_spec(
history_spec, self.datetime, self._most_recent_data,
)
else:
self.history_container = self.history_container_class(
self.history_specs,
self.current_universe(),
self.sim_params.first_open,
self.sim_params.data_frequency,
)
def get_history_spec(self, bar_count, frequency, field, ffill):
spec_key = HistorySpec.spec_key(bar_count, frequency, field, ffill)
if spec_key not in self.history_specs:
data_freq = self.sim_params.data_frequency
spec = HistorySpec(
bar_count,
frequency,
field,
ffill,
data_frequency=data_freq,
)
self.history_specs[spec_key] = spec
if not self.history_container:
self.history_container = self.history_container_class(
self.history_specs,
self.current_universe(),
self.datetime,
self.sim_params.data_frequency,
bar_data=self._most_recent_data,
)
self.history_container.ensure_spec(
spec, self.datetime, self._most_recent_data,
)
return self.history_specs[spec_key]
@api_method
def history(self, bar_count, frequency, field, ffill=True):
history_spec = self.get_history_spec(
bar_count,
frequency,
field,
ffill,
)
return self.history_container.get_history(history_spec, self.datetime)
####################
# Account Controls #
####################
def register_account_control(self, control):
"""
Register a new AccountControl to be checked on each bar.
"""
if self.initialized:
raise RegisterAccountControlPostInit()
self.account_controls.append(control)
def validate_account_controls(self):
for control in self.account_controls:
control.validate(self.updated_portfolio(),
self.updated_account(),
self.get_datetime(),
self.trading_client.current_data)
@api_method
def set_max_leverage(self, max_leverage=None):
"""
Set a limit on the maximum leverage of the algorithm.
"""
control = MaxLeverage(max_leverage)
self.register_account_control(control)
####################
# Trading Controls #
####################
def register_trading_control(self, control):
"""
Register a new TradingControl to be checked prior to order calls.
"""
if self.initialized:
raise RegisterTradingControlPostInit()
self.trading_controls.append(control)
@api_method
def set_max_position_size(self,
sid=None,
max_shares=None,
max_notional=None):
"""
Set a limit on the number of shares and/or dollar value held for the
given sid. Limits are treated as absolute values and are enforced at
the time that the algo attempts to place an order for sid. This means
that it's possible to end up with more than the max number of shares
due to splits/dividends, and more than the max notional due to price
improvement.
If an algorithm attempts to place an order that would result in
increasing the absolute value of shares/dollar value exceeding one of
these limits, raise a TradingControlException.
"""
control = MaxPositionSize(asset=sid,
max_shares=max_shares,
max_notional=max_notional)
self.register_trading_control(control)
@api_method
def set_max_order_size(self, sid=None, max_shares=None, max_notional=None):
"""
Set a limit on the number of shares and/or dollar value of any single
order placed for sid. Limits are treated as absolute values and are
enforced at the time that the algo attempts to place an order for sid.
If an algorithm attempts to place an order that would result in
exceeding one of these limits, raise a TradingControlException.
"""
control = MaxOrderSize(asset=sid,
max_shares=max_shares,
max_notional=max_notional)
self.register_trading_control(control)
@api_method
def set_max_order_count(self, max_count):
"""
Set a limit on the number of orders that can be placed within the given
time interval.
"""
control = MaxOrderCount(max_count)
self.register_trading_control(control)
@api_method
def set_do_not_order_list(self, restricted_list):
"""
Set a restriction on which sids can be ordered.
"""
control = RestrictedListOrder(restricted_list)
self.register_trading_control(control)
@api_method
def set_long_only(self):
"""
Set a rule specifying that this algorithm cannot take short positions.
"""
self.register_trading_control(LongOnly())
###########
# FFC API #
###########
@api_method
@require_not_initialized(AddTermPostInit())
def add_factor(self, factor, name):
if name in self._factors:
raise ValueError("Name %r is already a factor!" % name)
self._factors[name] = factor
@api_method
@require_not_initialized(AddTermPostInit())
def add_filter(self, filter):
name = "anon_filter_%d" % len(self._filters)
self._filters[name] = filter
# Note: add_classifier is not yet implemented since you can't do anything
# useful with classifiers yet.
def _all_terms(self):
# Merge all three dicts.
return dict(
chain.from_iterable(
iteritems(terms)
for terms in (self._filters, self._factors, self._classifiers)
)
)
def compute_factor_matrix(self, start_date):
"""
Compute a factor matrix starting at start_date.
"""
days = self.trading_environment.trading_days
start_date_loc = days.get_loc(start_date)
sim_end = self.sim_params.last_close.normalize()
end_loc = min(start_date_loc + 252, days.get_loc(sim_end))
end_date = days[end_loc]
return self.engine.factor_matrix(
self._all_terms(),
start_date,
end_date,
), end_date
def current_universe(self):
return self._current_universe
@classmethod
def all_api_methods(cls):
"""
Return a list of all the TradingAlgorithm API methods.
"""
return [
fn for fn in itervalues(vars(cls))
if getattr(fn, 'is_api_method', False)
]
| apache-2.0 |
huzq/scikit-learn | sklearn/decomposition/_incremental_pca.py | 2 | 14223 | """Incremental Principal Components Analysis."""
# Author: Kyle Kastner <kastnerkyle@gmail.com>
# Giorgio Patrini
# License: BSD 3 clause
import numpy as np
from scipy import linalg, sparse
from ._base import _BasePCA
from ..utils import check_array, gen_batches
from ..utils.extmath import svd_flip, _incremental_mean_and_var
from ..utils.validation import _deprecate_positional_args
class IncrementalPCA(_BasePCA):
"""Incremental principal components analysis (IPCA).
Linear dimensionality reduction using Singular Value Decomposition of
the data, keeping only the most significant singular vectors to
project the data to a lower dimensional space. The input data is centered
but not scaled for each feature before applying the SVD.
Depending on the size of the input data, this algorithm can be much more
memory efficient than a PCA, and allows sparse input.
This algorithm has constant memory complexity, on the order
of ``batch_size * n_features``, enabling use of np.memmap files without
loading the entire file into memory. For sparse matrices, the input
is converted to dense in batches (in order to be able to subtract the
mean) which avoids storing the entire dense matrix at any one time.
The computational overhead of each SVD is
``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
remain in memory at a time. There will be ``n_samples / batch_size`` SVD
computations to get the principal components, versus 1 large SVD of
complexity ``O(n_samples * n_features ** 2)`` for PCA.
Read more in the :ref:`User Guide <IncrementalPCA>`.
.. versionadded:: 0.16
Parameters
----------
n_components : int or None, (default=None)
Number of components to keep. If ``n_components `` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
whiten : bool, optional
When True (False by default) the ``components_`` vectors are divided
by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometimes
improve the predictive accuracy of the downstream estimators by
making data respect some hard-wired assumptions.
copy : bool, (default=True)
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
batch_size : int or None, (default=None)
The number of samples to use for each batch. Only used when calling
``fit``. If ``batch_size`` is ``None``, then ``batch_size``
is inferred from the data and set to ``5 * n_features``, to provide a
balance between approximation accuracy and memory consumption.
Attributes
----------
components_ : array, shape (n_components, n_features)
Components with maximum variance.
explained_variance_ : array, shape (n_components,)
Variance explained by each of the selected components.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If all components are stored, the sum of explained variances is equal
to 1.0.
singular_values_ : array, shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
mean_ : array, shape (n_features,)
Per-feature empirical mean, aggregate over calls to ``partial_fit``.
var_ : array, shape (n_features,)
Per-feature empirical variance, aggregate over calls to
``partial_fit``.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf.
n_components_ : int
The estimated number of components. Relevant when
``n_components=None``.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
batch_size_ : int
Inferred batch size from ``batch_size``.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import IncrementalPCA
>>> from scipy import sparse
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = IncrementalPCA(n_components=7, batch_size=200)
>>> # either partially fit on smaller batches of data
>>> transformer.partial_fit(X[:100, :])
IncrementalPCA(batch_size=200, n_components=7)
>>> # or let the fit function itself divide the data into batches
>>> X_sparse = sparse.csr_matrix(X)
>>> X_transformed = transformer.fit_transform(X_sparse)
>>> X_transformed.shape
(1797, 7)
Notes
-----
Implements the incremental PCA model from:
*D. Ross, J. Lim, R. Lin, M. Yang, Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77, Issue 1-3,
pp. 125-141, May 2008.*
See https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
This model is an extension of the Sequential Karhunen-Loeve Transform from:
*A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve Basis Extraction and
its Application to Images, IEEE Transactions on Image Processing, Volume 9,
Number 8, pp. 1371-1374, August 2000.*
See https://www.cs.technion.ac.il/~mic/doc/skl-ip.pdf
We have specifically abstained from an optimization used by authors of both
papers, a QR decomposition used in specific situations to reduce the
algorithmic complexity of the SVD. The source for this technique is
*Matrix Computations, Third Edition, G. Holub and C. Van Loan, Chapter 5,
section 5.4.4, pp 252-253.*. This technique has been omitted because it is
advantageous only when decomposing a matrix with ``n_samples`` (rows)
>= 5/3 * ``n_features`` (columns), and hurts the readability of the
implemented algorithm. This would be a good opportunity for future
optimization, if it is deemed necessary.
References
----------
D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust Visual
Tracking, International Journal of Computer Vision, Volume 77,
Issue 1-3, pp. 125-141, May 2008.
G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
Section 5.4.4, pp. 252-253.
See also
--------
PCA
KernelPCA
SparsePCA
TruncatedSVD
"""
@_deprecate_positional_args
def __init__(self, n_components=None, *, whiten=False, copy=True,
batch_size=None):
self.n_components = n_components
self.whiten = whiten
self.copy = copy
self.batch_size = batch_size
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
self.components_ = None
self.n_samples_seen_ = 0
self.mean_ = .0
self.var_ = .0
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
X = self._validate_data(X, accept_sparse=['csr', 'csc', 'lil'],
copy=self.copy, dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(n_samples, self.batch_size_,
min_batch_size=self.n_components or 0):
X_batch = X[batch]
if sparse.issparse(X_batch):
X_batch = X_batch.toarray()
self.partial_fit(X_batch, check_input=False)
return self
def partial_fit(self, X, y=None, check_input=True):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
check_input : bool
Run check_array on X.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
if check_input:
if sparse.issparse(X):
raise TypeError(
"IncrementalPCA.partial_fit does not support "
"sparse input. Either convert data to dense "
"or use IncrementalPCA.fit to do so in batches.")
X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if not hasattr(self, 'components_'):
self.components_ = None
if self.n_components is None:
if self.components_ is None:
self.n_components_ = min(n_samples, n_features)
else:
self.n_components_ = self.components_.shape[0]
elif not 1 <= self.n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features))
elif not self.n_components <= n_samples:
raise ValueError("n_components=%r must be less or equal to "
"the batch number of samples "
"%d." % (self.n_components, n_samples))
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (self.components_.shape[0] !=
self.n_components_):
raise ValueError("Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value." %
(self.components_.shape[0], self.n_components_))
# This is the first partial_fit
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = 0
self.mean_ = .0
self.var_ = .0
# Update stats - they are 0 if this is the first step
col_mean, col_var, n_total_samples = \
_incremental_mean_and_var(
X, last_mean=self.mean_, last_variance=self.var_,
last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]))
n_total_samples = n_total_samples[0]
# Whitening
if self.n_samples_seen_ == 0:
# If it is the first step, simply whiten X
X -= col_mean
else:
col_batch_mean = np.mean(X, axis=0)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = \
np.sqrt((self.n_samples_seen_ * n_samples) /
n_total_samples) * (self.mean_ - col_batch_mean)
X = np.vstack((self.singular_values_.reshape((-1, 1)) *
self.components_, X, mean_correction))
U, S, Vt = linalg.svd(X, full_matrices=False)
U, Vt = svd_flip(U, Vt, u_based_decision=False)
explained_variance = S ** 2 / (n_total_samples - 1)
explained_variance_ratio = S ** 2 / np.sum(col_var * n_total_samples)
self.n_samples_seen_ = n_total_samples
self.components_ = Vt[:self.n_components_]
self.singular_values_ = S[:self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[:self.n_components_]
self.explained_variance_ratio_ = \
explained_variance_ratio[:self.n_components_]
if self.n_components_ < n_features:
self.noise_variance_ = \
explained_variance[self.n_components_:].mean()
else:
self.noise_variance_ = 0.
return self
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set, using minibatches of size batch_size if X is
sparse.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2],
... [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, n_components=2)
>>> ipca.transform(X) # doctest: +SKIP
"""
if sparse.issparse(X):
n_samples = X.shape[0]
output = []
for batch in gen_batches(n_samples, self.batch_size_,
min_batch_size=self.n_components or 0):
output.append(super().transform(X[batch].toarray()))
return np.vstack(output)
else:
return super().transform(X)
| bsd-3-clause |
maheshakya/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/tests/test_axes.py | 1 | 20378 | import numpy as np
from numpy import ma
import matplotlib
from matplotlib.testing.decorators import image_comparison, knownfailureif
import matplotlib.pyplot as plt
@image_comparison(baseline_images=['formatter_ticker_001',
'formatter_ticker_002',
'formatter_ticker_003',
'formatter_ticker_004',
'formatter_ticker_005',
])
def test_formatter_ticker():
import matplotlib.testing.jpl_units as units
units.register()
# This essentially test to see if user specified labels get overwritten
# by the auto labeler functionality of the axes.
xdata = [ x*units.sec for x in range(10) ]
ydata1 = [ (1.5*y - 0.5)*units.km for y in range(10) ]
ydata2 = [ (1.75*y - 1.0)*units.km for y in range(10) ]
fig = plt.figure()
ax = plt.subplot( 111 )
ax.set_xlabel( "x-label 001" )
fig = plt.figure()
ax = plt.subplot( 111 )
ax.set_xlabel( "x-label 001" )
ax.plot( xdata, ydata1, color='blue', xunits="sec" )
fig = plt.figure()
ax = plt.subplot( 111 )
ax.set_xlabel( "x-label 001" )
ax.plot( xdata, ydata1, color='blue', xunits="sec" )
ax.set_xlabel( "x-label 003" )
fig = plt.figure()
ax = plt.subplot( 111 )
ax.plot( xdata, ydata1, color='blue', xunits="sec" )
ax.plot( xdata, ydata2, color='green', xunits="hour" )
ax.set_xlabel( "x-label 004" )
# See SF bug 2846058
# https://sourceforge.net/tracker/?func=detail&aid=2846058&group_id=80706&atid=560720
fig = plt.figure()
ax = plt.subplot( 111 )
ax.plot( xdata, ydata1, color='blue', xunits="sec" )
ax.plot( xdata, ydata2, color='green', xunits="hour" )
ax.set_xlabel( "x-label 005" )
ax.autoscale_view()
@image_comparison(baseline_images=['offset_points'])
def test_basic_annotate():
# Setup some data
t = np.arange( 0.0, 5.0, 0.01 )
s = np.cos( 2.0*np.pi * t )
# Offset Points
fig = plt.figure()
ax = fig.add_subplot( 111, autoscale_on=False, xlim=(-1,5), ylim=(-3,5) )
line, = ax.plot( t, s, lw=3, color='purple' )
ax.annotate( 'local max', xy=(3, 1), xycoords='data',
xytext=(3, 3), textcoords='offset points' )
@image_comparison(baseline_images=['polar_axes'])
def test_polar_annotations():
# you can specify the xypoint and the xytext in different
# positions and coordinate systems, and optionally turn on a
# connecting line and mark the point with a marker. Annotations
# work on polar axes too. In the example below, the xy point is
# in native coordinates (xycoords defaults to 'data'). For a
# polar axes, this is in (theta, radius) space. The text in this
# example is placed in the fractional figure coordinate system.
# Text keyword args like horizontal and vertical alignment are
# respected
# Setup some data
r = np.arange(0.0, 1.0, 0.001 )
theta = 2.0 * 2.0 * np.pi * r
fig = plt.figure()
ax = fig.add_subplot( 111, polar=True )
line, = ax.plot( theta, r, color='#ee8d18', lw=3 )
ind = 800
thisr, thistheta = r[ind], theta[ind]
ax.plot([thistheta], [thisr], 'o')
ax.annotate('a polar annotation',
xy=(thistheta, thisr), # theta, radius
xytext=(0.05, 0.05), # fraction, fraction
textcoords='figure fraction',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left',
verticalalignment='baseline',
)
#--------------------------------------------------------------------
@image_comparison(baseline_images=['polar_coords'])
def test_polar_coord_annotations():
# You can also use polar notation on a catesian axes. Here the
# native coordinate system ('data') is cartesian, so you need to
# specify the xycoords and textcoords as 'polar' if you want to
# use (theta, radius)
from matplotlib.patches import Ellipse
el = Ellipse((0,0), 10, 20, facecolor='r', alpha=0.5)
fig = plt.figure()
ax = fig.add_subplot( 111, aspect='equal' )
ax.add_artist( el )
el.set_clip_box( ax.bbox )
ax.annotate('the top',
xy=(np.pi/2., 10.), # theta, radius
xytext=(np.pi/3, 20.), # theta, radius
xycoords='polar',
textcoords='polar',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left',
verticalalignment='baseline',
clip_on=True, # clip to the axes bounding box
)
ax.set_xlim( -20, 20 )
ax.set_ylim( -20, 20 )
@image_comparison(baseline_images=['fill_units'])
def test_fill_units():
from datetime import datetime
import matplotlib.testing.jpl_units as units
units.register()
# generate some data
t = units.Epoch( "ET", dt=datetime(2009, 4, 27) )
value = 10.0 * units.deg
day = units.Duration( "ET", 24.0 * 60.0 * 60.0 )
fig = plt.figure()
# Top-Left
ax1 = fig.add_subplot( 221 )
ax1.plot( [t], [value], yunits='deg', color='red' )
ax1.fill( [733525.0, 733525.0, 733526.0, 733526.0],
[0.0, 0.0, 90.0, 0.0], 'b' )
# Top-Right
ax2 = fig.add_subplot( 222 )
ax2.plot( [t], [value], yunits='deg', color='red' )
ax2.fill( [t, t, t+day, t+day],
[0.0, 0.0, 90.0, 0.0], 'b' )
# Bottom-Left
ax3 = fig.add_subplot( 223 )
ax3.plot( [t], [value], yunits='deg', color='red' )
ax3.fill( [733525.0, 733525.0, 733526.0, 733526.0],
[0*units.deg, 0*units.deg, 90*units.deg, 0*units.deg], 'b' )
# Bottom-Right
ax4 = fig.add_subplot( 224 )
ax4.plot( [t], [value], yunits='deg', color='red' )
ax4.fill( [t, t, t+day, t+day],
[0*units.deg, 0*units.deg, 90*units.deg, 0*units.deg],
facecolor="blue" )
fig.autofmt_xdate()
@image_comparison(baseline_images=['single_point'])
def test_single_point():
fig = plt.figure()
plt.subplot( 211 )
plt.plot( [0], [0], 'o' )
plt.subplot( 212 )
plt.plot( [1], [1], 'o' )
@image_comparison(baseline_images=['single_date'])
def test_single_date():
time1=[ 721964.0 ]
data1=[ -65.54 ]
fig = plt.figure()
plt.subplot( 211 )
plt.plot_date( time1, data1, 'o', color='r' )
plt.subplot( 212 )
plt.plot( time1, data1, 'o', color='r' )
@image_comparison(baseline_images=['shaped_data'])
def test_shaped_data():
xdata = np.array([[ 0.53295185, 0.23052951, 0.19057629, 0.66724975, 0.96577916,
0.73136095, 0.60823287, 0.017921 , 0.29744742, 0.27164665],
[ 0.2798012 , 0.25814229, 0.02818193, 0.12966456, 0.57446277,
0.58167607, 0.71028245, 0.69112737, 0.89923072, 0.99072476],
[ 0.81218578, 0.80464528, 0.76071809, 0.85616314, 0.12757994,
0.94324936, 0.73078663, 0.09658102, 0.60703967, 0.77664978],
[ 0.28332265, 0.81479711, 0.86985333, 0.43797066, 0.32540082,
0.43819229, 0.92230363, 0.49414252, 0.68168256, 0.05922372],
[ 0.10721335, 0.93904142, 0.79163075, 0.73232848, 0.90283839,
0.68408046, 0.25502302, 0.95976614, 0.59214115, 0.13663711],
[ 0.28087456, 0.33127607, 0.15530412, 0.76558121, 0.83389773,
0.03735974, 0.98717738, 0.71432229, 0.54881366, 0.86893953],
[ 0.77995937, 0.995556 , 0.29688434, 0.15646162, 0.051848 ,
0.37161935, 0.12998491, 0.09377296, 0.36882507, 0.36583435],
[ 0.37851836, 0.05315792, 0.63144617, 0.25003433, 0.69586032,
0.11393988, 0.92362096, 0.88045438, 0.93530252, 0.68275072],
[ 0.86486596, 0.83236675, 0.82960664, 0.5779663 , 0.25724233,
0.84841095, 0.90862812, 0.64414887, 0.3565272 , 0.71026066],
[ 0.01383268, 0.3406093 , 0.76084285, 0.70800694, 0.87634056,
0.08213693, 0.54655021, 0.98123181, 0.44080053, 0.86815815]])
y1 = np.arange( 10 )
y1.shape = 1, 10
y2 = np.arange( 10 )
y2.shape = 10, 1
fig = plt.figure()
plt.subplot( 411 )
plt.plot( y1 )
plt.subplot( 412 )
plt.plot( y2 )
plt.subplot( 413 )
from nose.tools import assert_raises
assert_raises(ValueError,plt.plot, (y1,y2))
plt.subplot( 414 )
plt.plot( xdata[:,1], xdata[1,:], 'o' )
@image_comparison(baseline_images=['const_xy'])
def test_const_xy():
fig = plt.figure()
plt.subplot( 311 )
plt.plot( np.arange(10), np.ones( (10,) ) )
plt.subplot( 312 )
plt.plot( np.ones( (10,) ), np.arange(10) )
plt.subplot( 313 )
plt.plot( np.ones( (10,) ), np.ones( (10,) ), 'o' )
@image_comparison(baseline_images=['polar_wrap_180',
'polar_wrap_360',
])
def test_polar_wrap():
D2R = np.pi / 180.0
fig = plt.figure()
#NOTE: resolution=1 really should be the default
plt.subplot( 111, polar=True, resolution=1 )
plt.polar( [179*D2R, -179*D2R], [0.2, 0.1], "b.-" )
plt.polar( [179*D2R, 181*D2R], [0.2, 0.1], "g.-" )
plt.rgrids( [0.05, 0.1, 0.15, 0.2, 0.25, 0.3] )
fig = plt.figure()
#NOTE: resolution=1 really should be the default
plt.subplot( 111, polar=True, resolution=1 )
plt.polar( [2*D2R, -2*D2R], [0.2, 0.1], "b.-" )
plt.polar( [2*D2R, 358*D2R], [0.2, 0.1], "g.-" )
plt.polar( [358*D2R, 2*D2R], [0.2, 0.1], "r.-" )
plt.rgrids( [0.05, 0.1, 0.15, 0.2, 0.25, 0.3] )
@image_comparison(baseline_images=['polar_units', 'polar_units_2'])
def test_polar_units():
import matplotlib.testing.jpl_units as units
from nose.tools import assert_true
units.register()
pi = np.pi
deg = units.UnitDbl( 1.0, "deg" )
km = units.UnitDbl( 1.0, "km" )
x1 = [ pi/6.0, pi/4.0, pi/3.0, pi/2.0 ]
x2 = [ 30.0*deg, 45.0*deg, 60.0*deg, 90.0*deg ]
y1 = [ 1.0, 2.0, 3.0, 4.0]
y2 = [ 4.0, 3.0, 2.0, 1.0 ]
fig = plt.figure()
plt.polar( x2, y1, color = "blue" )
# polar( x2, y1, color = "red", xunits="rad" )
# polar( x2, y2, color = "green" )
fig = plt.figure()
# make sure runits and theta units work
y1 = [ y*km for y in y1 ]
plt.polar( x2, y1, color = "blue", thetaunits="rad", runits="km" )
assert_true( isinstance(plt.gca().get_xaxis().get_major_formatter(), units.UnitDblFormatter) )
@image_comparison(baseline_images=['polar_rmin'])
def test_polar_rmin():
r = np.arange(0, 3.0, 0.01)
theta = 2*np.pi*r
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
ax.plot(theta, r)
ax.set_rmax(2.0)
ax.set_rmin(0.5)
@image_comparison(baseline_images=['polar_theta_position'])
def test_polar_theta_position():
r = np.arange(0, 3.0, 0.01)
theta = 2*np.pi*r
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
ax.plot(theta, r)
ax.set_theta_zero_location("NW")
ax.set_theta_direction('clockwise')
@image_comparison(baseline_images=['axvspan_epoch'])
def test_axvspan_epoch():
from datetime import datetime
import matplotlib.testing.jpl_units as units
units.register()
# generate some data
t0 = units.Epoch( "ET", dt=datetime(2009, 1, 20) )
tf = units.Epoch( "ET", dt=datetime(2009, 1, 21) )
dt = units.Duration( "ET", units.day.convert( "sec" ) )
fig = plt.figure()
plt.axvspan( t0, tf, facecolor="blue", alpha=0.25 )
ax = plt.gca()
ax.set_xlim( t0 - 5.0*dt, tf + 5.0*dt )
@image_comparison(baseline_images=['axhspan_epoch'])
def test_axhspan_epoch():
from datetime import datetime
import matplotlib.testing.jpl_units as units
units.register()
# generate some data
t0 = units.Epoch( "ET", dt=datetime(2009, 1, 20) )
tf = units.Epoch( "ET", dt=datetime(2009, 1, 21) )
dt = units.Duration( "ET", units.day.convert( "sec" ) )
fig = plt.figure()
plt.axhspan( t0, tf, facecolor="blue", alpha=0.25 )
ax = plt.gca()
ax.set_ylim( t0 - 5.0*dt, tf + 5.0*dt )
@image_comparison(baseline_images=['hexbin_extent'])
def test_hexbin_extent():
# this test exposes sf bug 2856228
fig = plt.figure()
ax = fig.add_subplot(111)
data = np.arange(2000.)/2000.
data.shape = 2, 1000
x, y = data
ax.hexbin(x, y, extent=[.1, .3, .6, .7])
@image_comparison(baseline_images=['nonfinite_limits'])
def test_nonfinite_limits():
x = np.arange(0., np.e, 0.01)
olderr = np.seterr(divide='ignore') #silence divide by zero warning from log(0)
try:
y = np.log(x)
finally:
np.seterr(**olderr)
x[len(x)/2] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
@image_comparison(baseline_images=['imshow'])
def test_imshow():
#Create a NxN image
N=100
(x,y) = np.indices((N,N))
x -= N/2
y -= N/2
r = np.sqrt(x**2+y**2-x*y)
#Create a contour plot at N/4 and extract both the clip path and transform
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(r)
@image_comparison(baseline_images=['imshow_clip'], tol=1e-2)
def test_imshow_clip():
# As originally reported by Gellule Xg <gellule.xg@free.fr>
#Create a NxN image
N=100
(x,y) = np.indices((N,N))
x -= N/2
y -= N/2
r = np.sqrt(x**2+y**2-x*y)
#Create a contour plot at N/4 and extract both the clip path and transform
fig = plt.figure()
ax = fig.add_subplot(111)
c = ax.contour(r,[N/4])
x = c.collections[0]
clipPath = x.get_paths()[0]
clipTransform = x.get_transform()
from matplotlib.transforms import TransformedPath
clip_path = TransformedPath(clipPath, clipTransform)
#Plot the image clipped by the contour
ax.imshow(r, clip_path=clip_path)
@image_comparison(baseline_images=['polycollection_joinstyle'])
def test_polycollection_joinstyle():
# Bug #2890979 reported by Matthew West
from matplotlib import collections as mcoll
fig = plt.figure()
ax = fig.add_subplot(111)
verts = np.array([[1,1], [1,2], [2,2], [2,1]])
c = mcoll.PolyCollection([verts], linewidths = 40)
ax.add_collection(c)
ax.set_xbound(0, 3)
ax.set_ybound(0, 3)
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['fill_between_interpolate'], tol=1e-2)
def test_fill_between_interpolate():
x = np.arange(0.0, 2, 0.02)
y1 = np.sin(2*np.pi*x)
y2 = 1.2*np.sin(4*np.pi*x)
fig = plt.figure()
ax = fig.add_subplot(211)
ax.plot(x, y1, x, y2, color='black')
ax.fill_between(x, y1, y2, where=y2>=y1, facecolor='green', interpolate=True)
ax.fill_between(x, y1, y2, where=y2<=y1, facecolor='red', interpolate=True)
# Test support for masked arrays.
y2 = np.ma.masked_greater(y2, 1.0)
ax1 = fig.add_subplot(212, sharex=ax)
ax1.plot(x, y1, x, y2, color='black')
ax1.fill_between(x, y1, y2, where=y2>=y1, facecolor='green', interpolate=True)
ax1.fill_between(x, y1, y2, where=y2<=y1, facecolor='red', interpolate=True)
@image_comparison(baseline_images=['symlog'])
def test_symlog():
x = np.array([0,1,2,4,6,9,12,24])
y = np.array([1000000, 500000, 100000, 100, 5, 0, 0, 0])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_yscale('symlog')
ax.set_xscale=('linear')
ax.set_ylim(-1,10000000)
@image_comparison(baseline_images=['symlog2'])
def test_symlog2():
# Numbers from -50 to 50, with 0.1 as step
x = np.arange(-50,50, 0.001)
fig = plt.figure()
ax = fig.add_subplot(511)
# Plots a simple linear function 'f(x) = x'
ax.plot(x, x)
ax.set_xscale('symlog', linthreshx=20.0)
ax.grid(True)
ax = fig.add_subplot(512)
# Plots a simple linear function 'f(x) = x'
ax.plot(x, x)
ax.set_xscale('symlog', linthreshx=2.0)
ax.grid(True)
ax = fig.add_subplot(513)
# Plots a simple linear function 'f(x) = x'
ax.plot(x, x)
ax.set_xscale('symlog', linthreshx=1.0)
ax.grid(True)
ax = fig.add_subplot(514)
# Plots a simple linear function 'f(x) = x'
ax.plot(x, x)
ax.set_xscale('symlog', linthreshx=0.1)
ax.grid(True)
ax = fig.add_subplot(515)
# Plots a simple linear function 'f(x) = x'
ax.plot(x, x)
ax.set_xscale('symlog', linthreshx=0.01)
ax.grid(True)
ax.set_ylim(-0.1, 0.1)
@image_comparison(baseline_images=['pcolormesh'], tol=0.02)
def test_pcolormesh():
n = 12
x = np.linspace(-1.5,1.5,n)
y = np.linspace(-1.5,1.5,n*2)
X,Y = np.meshgrid(x,y);
Qx = np.cos(Y) - np.cos(X)
Qz = np.sin(Y) + np.sin(X)
Qx = (Qx + 1.1)
Z = np.sqrt(X**2 + Y**2)/5;
Z = (Z - Z.min()) / (Z.max() - Z.min())
# The color array can include masked values:
Zm = ma.masked_where(np.fabs(Qz) < 0.5*np.amax(Qz), Z)
fig = plt.figure()
ax = fig.add_subplot(131)
ax.pcolormesh(Qx,Qz,Z, lw=0.5, edgecolors='k')
ax.set_title('lw=0.5')
ax.set_xticks([])
ax.set_yticks([])
ax = fig.add_subplot(132)
ax.pcolormesh(Qx,Qz,Z, lw=3, edgecolors='k')
ax.set_title('lw=3')
ax.set_xticks([])
ax.set_yticks([])
ax = fig.add_subplot(133)
ax.pcolormesh(Qx,Qz,Z, shading="gouraud")
ax.set_title('gouraud')
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['canonical'])
def test_canonical():
fig, ax = plt.subplots()
ax.plot([1,2,3])
@image_comparison(baseline_images=['arc_ellipse'])
def test_arc_ellipse():
from matplotlib import patches
xcenter, ycenter = 0.38, 0.52
width, height = 1e-1, 3e-1
angle = -30
theta = np.arange(0.0, 360.0, 1.0)*np.pi/180.0
x = width/2. * np.cos(theta)
y = height/2. * np.sin(theta)
rtheta = angle*np.pi/180.
R = np.array([
[np.cos(rtheta), -np.sin(rtheta)],
[np.sin(rtheta), np.cos(rtheta)],
])
x, y = np.dot(R, np.array([x, y]))
x += xcenter
y += ycenter
fig = plt.figure()
ax = fig.add_subplot(211, aspect='auto')
ax.fill(x, y, alpha=0.2, facecolor='yellow', edgecolor='yellow', linewidth=1, zorder=1)
e1 = patches.Arc((xcenter, ycenter), width, height,
angle=angle, linewidth=2, fill=False, zorder=2)
ax.add_patch(e1)
ax = fig.add_subplot(212, aspect='equal')
ax.fill(x, y, alpha=0.2, facecolor='green', edgecolor='green', zorder=1)
e2 = patches.Arc((xcenter, ycenter), width, height,
angle=angle, linewidth=2, fill=False, zorder=2)
ax.add_patch(e2)
@image_comparison(baseline_images=['units_strings'])
def test_units_strings():
# Make sure passing in sequences of strings doesn't cause the unit
# conversion registry to recurse infinitely
Id = ['50', '100', '150', '200', '250']
pout = ['0', '7.4', '11.4', '14.2', '16.3']
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(Id, pout)
@image_comparison(baseline_images=['markevery'])
def test_markevery():
x = np.linspace(0, 10, 100)
y = np.sin(x) * np.sqrt(x/10 + 0.5)
# check marker only plot
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, 'o', label='default')
ax.plot(x, y, 'd', markevery=None, label='mark all')
ax.plot(x, y, 's', markevery=10, label='mark every 10')
ax.plot(x, y, '+', markevery=(5, 20), label='mark every 5 starting at 10')
ax.legend()
@image_comparison(baseline_images=['markevery_line'])
def test_markevery_line():
x = np.linspace(0, 10, 100)
y = np.sin(x) * np.sqrt(x/10 + 0.5)
# check line/marker combos
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, '-o', label='default')
ax.plot(x, y, '-d', markevery=None, label='mark all')
ax.plot(x, y, '-s', markevery=10, label='mark every 10')
ax.plot(x, y, '-+', markevery=(5, 20), label='mark every 5 starting at 10')
ax.legend()
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False)
| gpl-2.0 |
hitszxp/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
liam2/larray | setup.py | 1 | 1809 | from __future__ import print_function
import os
from setuptools import setup, find_packages
def readlocal(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
DISTNAME = 'larray'
VERSION = '0.32.1'
AUTHOR = 'Gaetan de Menten, Geert Bryon, Johan Duyck, Alix Damman'
AUTHOR_EMAIL = 'gdementen@gmail.com'
DESCRIPTION = "N-D labeled arrays in Python"
LONG_DESCRIPTION = readlocal("README.rst")
SETUP_REQUIRES = []
# - pandas >= 0.20.0 is required since commit 01669f2024a7bffe47cceec0a0fd845f71b6f7cc
# (issue 702 : fixed bug when writing metadata using HDF format)
INSTALL_REQUIRES = ['numpy >= 1.13', 'pandas >= 0.20.0']
TESTS_REQUIRE = ['pytest', 'pytest-pep8']
LICENSE = 'GPLv3'
URL = 'https://github.com/larray-project/larray'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
]
setup(
name=DISTNAME,
version=VERSION,
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
setup_requires=SETUP_REQUIRES,
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRE,
url=URL,
packages=find_packages(),
include_package_data=True,
)
| gpl-3.0 |
fengzhyuan/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
cosmoharrigan/pylearn2 | pylearn2/train_extensions/plots.py | 34 | 9617 | """
Plot monitoring extensions while training.
"""
__authors__ = "Laurent Dinh"
__copyright__ = "Copyright 2014, Universite de Montreal"
__credits__ = ["Laurent Dinh"]
__license__ = "3-clause BSD"
__maintainer__ = "Laurent Dinh"
__email__ = "dinhlaur@iro"
import logging
import os
import os.path
import stat
import numpy
np = numpy
from pylearn2.train_extensions import TrainExtension
from theano.compat.six.moves import xrange
from pylearn2.utils import as_floatX, wraps
if os.getenv('DISPLAY') is None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import warnings
log = logging.getLogger(__name__)
def make_readable(fn):
"""
Make a file readable by all.
Practical when the plot is in your public_html.
Parameters
----------
fn : str
Filename you wish to make public readable.
"""
st = os.stat(fn)
# Create the desired permission
st_mode = st.st_mode
read_all = stat.S_IRUSR
read_all |= stat.S_IRGRP
read_all |= stat.S_IROTH
# Set the permission
os.chmod(fn, st_mode | read_all)
def get_best_layout(n_plots):
"""
Find the best basic layout for a given number of plots.
Minimize the perimeter with a minimum area (``n_plots``) for
an integer rectangle.
Parameters
----------
n_plots : int
The number of plots to display
Returns
-------
n_rows : int
Number of rows in the layout
n_cols :
Number of columns in the layout
"""
assert n_plots > 0
# Initialize the layout
n_rows = 1
n_cols = np.ceil(n_plots*1./n_rows)
n_cols = int(n_cols)
half_perimeter = n_cols + 1
# Limit the range of possible layouts
max_row = np.sqrt(n_plots)
max_row = np.round(max_row)
max_row = int(max_row)
for l in xrange(1, max_row + 1):
width = np.ceil(n_plots*1./l)
width = int(width)
if half_perimeter >= (width + l):
n_rows = l
n_cols = np.ceil(n_plots*1./n_rows)
n_cols = int(n_cols)
half_perimeter = n_rows + n_cols
return n_rows, n_cols
def create_colors(n_colors):
"""
Create an array of n_colors
Parameters
----------
n_colors : int
The number of colors to create
Returns
-------
colors_rgb : np.array
An array of shape (n_colors, 3) in RGB format
"""
# Create the list of color hue
colors_hue = np.arange(n_colors)
colors_hue = as_floatX(colors_hue)
colors_hue *= 1./n_colors
# Set the color in HSV format
colors_hsv = np.ones((n_colors, 3))
colors_hsv[:, 2] *= .75
colors_hsv[:, 0] = colors_hue
# Put in a matplotlib-friendly format
colors_hsv = colors_hsv.reshape((1, )+colors_hsv.shape)
# Convert to RGB
colors_rgb = matplotlib.colors.hsv_to_rgb(colors_hsv)
colors_rgb = colors_rgb[0]
return colors_rgb
class Plotter(object):
"""
Base class for plotting.
Parameters
----------
freq : int, optional
The number of epochs before producing plot.
Default is None (set by the PlotManager).
"""
def __init__(self, freq=None):
self.filenames = []
self.freq = freq
def setup(self, model, dataset, algorithm):
"""
Setup the plotters.
Parameters
----------
model : pylearn2.models.Model
The model trained
dataset : pylearn2.datasets.Dataset
The dataset on which the model is trained
algorithm : pylearn2.training_algorithms.TrainingAlgorithm
The algorithm the model is trained with
"""
raise NotImplementedError(str(type(self))+" does not implement setup.")
def plot(self):
"""
The method that draw and save the desired figure, which depend
on the object and its attribute. This method is called by the
PlotManager object as frequently as the `freq` attribute defines it.
"""
raise NotImplementedError(str(type(self))+" does not implement plot.")
def set_permissions(self, public):
"""
Make the produced files readable by everyone.
Parameters
----------
public : bool
If public is True, then the associated files are
readable by everyone.
"""
if public:
for filename in self.filenames:
make_readable(filename)
class Plots(Plotter):
"""
Plot different monitors.
Parameters
----------
channel_names : list of str
List of monitor channels to plot
save_path : str
Filename of the plot file
share : float, optional
The percentage of epochs shown. Default is .8 (80%)
per_second : bool, optional
Set if the x-axis is in seconds, in epochs otherwise.
Default is False.
kwargs : dict
Passed on to the superclass.
"""
def __init__(self, channel_names,
save_path, share=.8,
per_second=False,
** kwargs):
super(Plots, self).__init__(** kwargs)
if not save_path.endswith('.png'):
save_path += '.png'
self.save_path = save_path
self.filenames = [self.save_path]
self.channel_names = channel_names
self.n_colors = len(self.channel_names)
self.colors_rgb = create_colors(self.n_colors)
self.share = share
self.per_second = per_second
@wraps(Plotter.setup)
def setup(self, model, dataset, algorithm):
self.model = model
@wraps(Plotter.plot)
def plot(self):
monitor = self.model.monitor
channels = monitor.channels
channel_names = self.channel_names
# Accumulate the plots
plots = np.array(channels[channel_names[0]].val_record)
plots = plots.reshape((1, plots.shape[0]))
plots = plots.repeat(self.n_colors, axis=0)
for i, channel_name in enumerate(channel_names[1:]):
plots[i+1] = np.array(channels[channel_name].val_record)
# Keep the relevant part
n_min = plots.shape[1]
n_min -= int(np.ceil(plots.shape[1] * self.share))
plots = plots[:, n_min:]
# Get the x axis
x = np.arange(plots.shape[1])
x += n_min
# Put in seconds if needed
if self.per_second:
seconds = channels['training_seconds_this_epoch'].val_record
seconds = np.array(seconds)
seconds = seconds.cumsum()
x = seconds[x]
# Plot the quantities
plt.figure()
for i in xrange(self.n_colors):
plt.plot(x, plots[i], color=self.colors_rgb[i],
alpha=.5)
plt.legend(self.channel_names)
plt.xlim(x[0], x[-1])
plt.ylim(plots.min(), plots.max())
plt.axis('on')
plt.savefig(self.save_path)
plt.close()
class PlotManager(TrainExtension):
"""
Class to manage the Plotter classes.
Parameters
----------
plots : list of pylearn2.train_extensions.Plotter
List of plots to make during training
freq : int
The default number of epochs before producing plot.
public : bool
Whether the files are made public or not. Default is true.
html_path : str
The path where the HTML page is saved. The associated files should be
in the same folder. Default is None, then there is no HTML page.
"""
def __init__(self, plots, freq, public=True, html_path=None):
self.plots = plots
self.freq = freq
# Set a default freq
for plot in self.plots:
if plot.freq is None:
plot.freq = self.freq
self.public = public
self.html_path = html_path
self.filenames = []
self.count = 0
@wraps(TrainExtension.setup)
def setup(self, model, dataset, algorithm):
for plot in self.plots:
plot.setup(model, dataset, algorithm)
for filename in plot.filenames:
warn = ("/home/www-etud/" in filename)
warn |= (os.environ['HOME'] in filename)
warn &= ('umontreal' in os.environ['HOSTNAME'])
if warn:
warnings.warn('YOU MIGHT RUIN THE NFS'
'BY SAVING IN THIS PATH !')
self.filenames.append(filename)
if self.html_path is not None:
header = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<html xmlns="http://www.w3.org/1999/xhtml"'
'xml:lang="en">\n'
'\t<body>\n')
footer = ('\t</body>\n'
'</html>')
body = ''
for filename in self.filenames:
basename = os.path.basename(filename)
body += '<img src = "' + basename + '"><br/>\n'
with open(self.html_path, 'w') as f:
f.write(header + body + footer)
f.close()
if self.public:
make_readable(self.html_path)
@wraps(TrainExtension.on_monitor)
def on_monitor(self, model, dataset, algorithm):
self.count += 1
for plot in self.plots:
if self.count % plot.freq == 0:
try:
plot.plot()
plot.set_permissions(self.public)
except Exception as e:
warnings.warn(str(plot) + ' has failed.\n'
+ str(e))
| bsd-3-clause |
untom/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
McDermott-Group/LabRAD | LabRAD/Measurements/General/waveform.py | 1 | 22660 | # Copyright (C) 2015 Samuel Owen, Ivan Pechenezhskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module could be used to create the waveforms that are used to
populate the DAC boars. See the __main__ section of this file for
examples.
"""
import collections
import itertools
import warnings
import numpy as np
import scipy.signal as ss
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook
warnings.filterwarnings("ignore", category=matplotlib.cbook.mplDeprecation)
from win32api import SetConsoleCtrlHandler
import labrad.units as units
def _flatten(iterable):
"""
De-nest a list of _WavePulses for convenience.
Input:
iterable: an iterable object.
Output:
list: de-nested list of _WavePulses.
"""
remainder = iter(iterable)
while True:
first = next(remainder)
if (isinstance(first, collections.Iterable) and
not isinstance(first, _WavePulse)):
remainder = itertools.chain(first, remainder)
else:
yield first
class _WavePulse():
"""
Base pulse class that contains shared methods.
"""
def _ns(self, time):
"""
Convert time to nanoseconds. Return an integer without any
units attached.
Input:
time: physical or numerical (in ns) time value.
Output:
time: numerical time value in ns.
"""
if isinstance(time, units.Value):
time = time['ns']
return int(np.round(time))
def _init_times(self, start=None, duration=None, end=None):
"""
Define the pulse start, end, and duration attributes.
Inputs:
start: start time of the pulse.
duration: duration of the pulse.
end: end time of the pulse.
Output:
None.
"""
if [start, duration, end].count(None) > 1:
raise ValueError("A pair of time parameters is required " +
"to define a pulse. These possible time " +
"parameters are 'start', 'duration', and 'end'.")
if start is not None:
self.start = self._ns(start)
if duration is not None:
self.duration = self._ns(duration)
if end is not None:
self.end = self._ns(end)
if start is None:
self.start = self.end - self.duration + 1
if duration is None:
self.duration = self.end - self.start + 1
if end is None:
self.end = self.start + self.duration - 1
if self.start > self.end + 1:
raise ValueError("The pulse ends before it starts: " +
"the pulse starts at " + str(self.start) + " ns " +
"and ends at " + str(self.end) + " ns.")
if self.end - self.start + 1 != self.duration:
raise ValueError("Inconsistent time parameters: the pulse" +
" starts at " + str(self.start) + " ns, its " +
"duration is " + str(self.duration) + " ns, while" +
" the pulse is expected to end at " +
str(self.end) + " ns.")
def _amplitude(self, amplitude):
"""
Process the amplitude (strip units from the amplitude value).
Input:
amplitude: amplitude of the pulse.
Output:
amplitude: amplitude of the pulse.
"""
if isinstance(amplitude, units.Value):
return amplitude[units.Unit(amplitude)]
else:
return float(amplitude)
def _harmonic(self, frequency, phase):
"""
Process the pulse frequency and phase.
Inputs:
frequency: frequency of the harmonic pulse.
phase: phase of the harmonic pulse.
Outputs:
frequency: frequency of the harmonic pulse.
phase: phase of the harmonic pulse.
"""
if isinstance(frequency, units.Value):
frequency = frequency['GHz']
else:
frequency = float(frequency)
if isinstance(phase, units.Value):
phase = phase['rad']
else:
phase = float(phase)
return frequency, phase
def _check_pulse(self):
"""
Check whether the pulse amplitudes are in -1.0 to 1.0 range.
Input:
None.
Output:
None.
"""
if any(abs(self.pulse) > 1):
raise ValueError('The pulse amplitude should not exceed 1.')
def after(self, time=0):
"""
Time point after the pulse.
Input:
time: time delay after this pulse in ns.
Output:
time: absolute time.
"""
return self.end + 1 + self._ns(time)
def before(self, time=0):
"""
Time point before the pulse.
Input:
time: time delay before this pulse in ns.
Output:
time: absolute time.
"""
return self.start - 1 - self._ns(time)
class DC(_WavePulse):
"""
DC pulse.
Inputs:
amplitude: amplitude of the dc pulse.
start: starting time of the dc pulse.
duration: length of the dc pulse.
end: ending time of the dc pulse.
"""
def __init__(self, amplitude=0, start=None, duration=None, end=None):
self._init_times(start, duration, end)
amplitude = self._amplitude(amplitude)
self.pulse = np.full(self.duration, amplitude)
self._check_pulse()
class Sine(_WavePulse):
"""
Sine pulse.
Inputs:
amplitude: amplitude of the sine pulse (default: 0).
frequency: frequency of the sine pulse (default: 0 Hz).
phase: phase of the sine pulse (default: 0 rad).
offset: constant dc offset of the sine pulse (default: 0).
start: starting time of the sine pulse.
duration: length of the sine pulse.
end: ending time of the sine pulse.
phase_ref: point in time that should have the specified
phase (default: start pulse time).
"""
def __init__(self, amplitude=0, frequency=0, phase=0, offset=0,
start=None, duration=None, end=None, phase_ref=None):
self._init_times(start, duration, end)
amplitude = self._amplitude(amplitude)
frequency, phase = self._harmonic(frequency, phase)
offset = self._amplitude(offset)
if phase_ref is None:
t0 = 0
else:
t0 = self.start - self._ns(phase_ref)
t = np.linspace(t0, t0 + self.duration - 1, self.duration)
self.pulse = (offset + amplitude *
np.sin(2 * np.pi * frequency * t + phase))
self._check_pulse()
class Cosine(_WavePulse):
"""
Cosine pulse.
Inputs:
amplitude: amplitude of the cosine pulse (default: 0).
frequency: frequency of the cosine pulse (default: 0 Hz).
phase: phase of the cosine pulse (default: 0 rad).
offset: constant dc offset of the cosine pulse (default: 0).
start: starting time of the cosine pulse.
duration: length of the cosine pulse.
end: ending time of the cosine pulse.
phase_ref: point in time that should have the specified
phase (default: start pulse time).
"""
def __init__(self, amplitude=0, frequency=0, phase=0, offset=0,
start=None, duration=None, end=None, phase_ref=None):
self._init_times(start, duration, end)
amplitude = self._amplitude(amplitude)
frequency, phase = self._harmonic(frequency, phase)
offset = self._amplitude(offset)
if phase_ref is None:
t0 = 0
else:
t0 = self.start - self._ns(phase_ref)
t = np.linspace(t0, t0 + self.duration - 1, self.duration)
self.pulse = (offset + amplitude *
np.cos(2 * np.pi * frequency * t + phase))
self._check_pulse()
class Gaussian(_WavePulse):
"""
Gaussian window pulse. The pulse is truncated at about 1 per 2^14
level since the DACs have 14-bit resolution.
Inputs:
amplitude: amplitude of the gaussian pulse.
start: starting time of the gaussian pulse.
duration: length of the gaussian pulse.
end: ending time of the gaussian pulse.
"""
def __init__(self, amplitude=0, start=None, duration=None, end=None):
self._init_times(start, duration, end)
amplitude = self._amplitude(amplitude)
sigma = (float(self.duration) - 1) / np.sqrt(112 * np.log(2))
self.pulse = amplitude * ss.gaussian(self.duration, sigma)
self._check_pulse()
class FromArray(_WavePulse):
"""
Generate a pulse from a numpy array. The start or end times can be
arbitrary, and the duration is derived automatically from the length
of the array
Inputs:
pulse_data: numpy array containing the pulse data in 1 ns
chunks.
start: starting time of the pulse.
end: ending time of the pulse.
"""
def __init__(self, pulse_data=[], start=None, end=None):
duration = len(pulse_data)
self._init_times(start, duration, end)
if isinstance(pulse_data, list):
pulse_data = np.array(pulse_data)
self.pulse = pulse_data
self._check_pulse()
class Waveform():
"""
Create a waveform from pulses.
The start of one pulse is expected to be one unit
(i.e. one nanosecond) after the end of the previous pulse
(i.e. pulse2.end - pulse1.start >= 1). Therefore, to make pulse B
start immediately after another pulse A initialize B.start to
(A.end + 1), or simply assign A.after() to B.start.
Inputs:
label: waveform label string.
args: arbitrarily long set of _WavePulses to create the waveform
from. To create a _WavePulse use one of the "public"
classes such as DC, Sine, Cosine, etc.
"""
def __init__(self, label='None', *args):
if not isinstance(label, str):
raise ValueError('Invalid waveform label.')
self.label = label
args = list(_flatten(args))
pulses = [arg for arg in args if isinstance(arg, _WavePulse)]
if len(pulses) > 0:
# Sort based on the start times.
for i in range(len(pulses))[::-1]:
for j in range(i):
if pulses[j].start > pulses[j + 1].start:
tmp = pulses[j + 1]
pulses[j + 1] = pulses[j]
pulses[j] = tmp
# Ensure there are no overlaps.
for i in range(len(pulses) - 1):
if pulses[i].end > pulses[i + 1].start:
raise ValueError("There are overlaps between " +
"the waveform pulses.")
# Loop through and fill unused spots with zeros.
pulses_filled = []
for i in range(len(pulses) - 1):
pulses_filled.append(pulses[i].pulse)
gap = pulses[i + 1].start - pulses[i].end
if gap > 1:
pulses_filled.append(np.zeros(gap - 1))
pulses_filled.append(pulses[len(pulses) - 1].pulse)
self.pulses = np.hstack(pulses_filled)
else:
self.pulses = np.array([0])
self.start = pulses[0].start
self.end = pulses[-1].end
self.duration = self.end - self.start + 1
def ECLDuringPulses(*args, **kwargs):
"""
Return _WavePulse to make ECL outputs go high during a set of
specified _WavePulses
Inputs:
args: set (or list) of _WavePulses during which an ECL pulse
should be generated.
pad_length: time before and after the pulses (default: 8 ns).
Output:
ECL: list of ECL _WavePulses.
"""
if 'pad_length' in kwargs:
if isinstance(kwargs['pad_length'], units.Value):
pad_length = kwargs['pad_length']['ns']
else:
pad_length = kwargs['pad_length']
try:
pad_length = int(np.round(pad_length))
except:
raise Exception("Invalid ECL pad length value.")
else:
pad_length = 8
args = list(_flatten(args))
pulses = [arg for arg in args if isinstance(arg, _WavePulse)]
ECL = []
for pulse in pulses:
ECL.append(DC(amplitude = 1,
start = pulse.before(pad_length),
end = pulse.after(pad_length)))
return ECL
def Harmonic(amplitude=0, frequency=0, phase=0,
cosine_offset=0, sine_offset=0,
start=None, duration=None, end=None, phase_ref=None):
"""
Return cosine and sine pulses.
Inputs:
amplitude: amplitude of the pulses (default: 0).
frequency: frequency of the pulses (default: 0 Hz).
phase: phase of the pulses (default: 0 rad).
cosine_offset: constant dc offset of the cosine pulse
(default: 0).
sine_offset: constant dc offset of the sine pulse
(default: 0).
start: starting time of the pulses.
duration: length of the pulses.
end: ending time of the pulses.
phase_ref: point in time that should have the specified
phase (default: start pulse time).
Outputs:
sine: Sine pulse object.
cosine: Cosine pulse object.
"""
return (Cosine(amplitude, frequency, phase,
cosine_offset, start, duration, end, phase_ref),
Sine(amplitude, frequency, phase,
sine_offset, start, duration, end, phase_ref))
def wfs_dict(*args, **kwargs):
"""
Return a waveform dictionary with the waveform labels as the keys.
Align the waveforms using the waveform starting time. Ensure that
the waveforms are of an equal length. The waveforms are zero-padded
at the start and the end to ensure that they are not shorter than
the minimum allowed length.
Inputs:
*args: arbitrarily long set of the Waveforms (instances of class
Waveforms).
*kwargs:
min_length: minimum allowed length of the final waveform.
Short waveforms are padded with zeros at the end
to increase their length (default: 20).
start_zeros: number of zeros to add to the start of each
waveform (default: 4).
end_zeros: number of zeros to add to the end of each
waveform (default: 4). Actual number of zeros added may
be higher if the waveform length does not satisfy
the min_length requirement.
Outputs:
waveforms: dictionary with the processed waveforms.
offset: difference between the corresponding index values
of the waveform numpy ndarrays and the time values that
specify the start and end times for the waveforms:
offset = ndarray_index - assigned_time_value, i.e.
ndarray_index = assigned_time_value + offset.
"""
defaults = {'min_length': 20, 'start_zeros': 4, 'end_zeros': 4}
for key in kwargs:
if isinstance(kwargs[key], units.Value):
kwargs[key] = kwargs[key]['ns']
try:
kwargs[key] = int(np.round(kwargs[key]))
except:
raise Exception("Invalid parameter '%s' value." %key)
defaults.update(kwargs)
min_len = defaults['min_length']
start, end = defaults['start_zeros'], defaults['end_zeros']
wfs = [arg for arg in args if isinstance(arg, Waveform)]
# Align the waveforms.
if wfs:
start_offset = min([wf.start for wf in wfs])
for wf in wfs:
wf.pulses = np.hstack([np.zeros(wf.start - start_offset),
wf.pulses])
else:
start_offset = 0
# Create an empty waveform 'None'.
wfs.append(Waveform('None', DC(start=start_offset, duration=1)))
# Ensure that the waveforms are long enough and of an equal length.
max_len = max([wf.pulses.size for wf in wfs]) + start + end
total_len = max(min_len, max_len)
for wf in wfs:
fin = max(total_len - start - wf.pulses.size, end)
wf.pulses = np.hstack([np.zeros(start), wf.pulses, np.zeros(fin)])
return {wf.label: wf.pulses for wf in wfs}, start - start_offset
def check_wfs(waveforms):
"""
Check that all waveforms have the same length.
Input:
waveforms: dictionary with the processed waveforms.
Output:
None.
"""
lengths = [waveforms[wf].size for wf in waveforms]
if lengths.count(lengths[0]) != len(lengths):
raise Exception('The waveform have different lengths.')
def _close_figure(self, signal=None):
"""
Close the waveform figure.
Input:
None.
Output:
None.
"""
plt.close(2)
def plot_wfs(waveforms, wf_labels, wf_colors=['r', 'g', 'm', 'b', 'k', 'c']):
"""
Plot waveforms.
Input:
waveforms: dictionary with the processed waveforms.
wf_labels: waveform labels to plot.
wf_colors: colors for waveform colorcoding.
Output:
None.
"""
if not isinstance(wf_colors, list):
wf_colors = list(wf_colors)
if not isinstance(wf_labels, list):
wf_labels = list(wf_labels)
time = waveforms[wf_labels[0]].size
time = np.linspace(0, time - 1, time)
plt.figure(2)
plt.ioff()
plt.clf()
for idx, wf in enumerate(wf_labels):
plt.plot(time, waveforms[wf], wf_colors[idx % 6],
label=wf_labels[idx])
plt.xlim(time[0], time[-1])
plt.legend()
plt.xlabel('Time [ns]')
plt.ylabel('Waveforms')
plt.draw()
plt.pause(0.05)
if __name__ == "__main__":
"""
Tests and examples. Add your test/example!
"""
# Explicitly close the waveform figure when the terminal is closed.
SetConsoleCtrlHandler(_close_figure, True)
# Cosine pulse with amplitude of 1 and frequency of 0.25 GHz
# starting at t = 2 ns and ending at t = 8 ns.
pulseA1 = Cosine(amplitude=1, frequency=0.25, start=2, end=8)
# Sine pulse with amplitude of 0.5 and frequency of 0.25 GHz
# starting at the start of pulseA1 and ending at the end of pulseA1.
pulseB1 = Sine(amplitude=0.5, frequency=0.25,
start=pulseA1.start, end=pulseA1.end)
# DC pulse with amplitude of -1 starting after the end of pulseA1.
# The pulse duration is 10 ns.
pulseB2 = DC(amplitude=-1, start=pulseA1.after(), duration=10)
# Combine the two pulses into one waveform. The waveform class
# automatically puts the wave pulses in the correct order.
waveformB = Waveform('B', pulseB1, pulseB2)
# Specifying the start, duration and end times at the same time will
# work only if these parameters are consistent, i.e. if the equation
# self.duration = self.end - self.start + 1 is satisfied.
pulseA2 = DC(start=pulseB2.start, duration=10, end=pulseB2.end)
try:
# Inconsistent specifications.
pulseA2 = DC(start=pulseB2.after(-1), duration=12, end=pulseB2.end)
except ValueError:
print('The inconsistent time error has been correctly caught.')
try:
# Amplitude should not exceed 1.
pulseA2 = Sine(amplitude=1, frequency=.25, offset=.1,
start=pulseB2.after(-1), duration=12)
except ValueError:
print('The amplitude error has been correctly caught.')
# Sine pulse with amplitude of 1 and frequency of 0.1 GHz
# starting 2 ns after pulseB1 and ending at the same time as
# pulseB2.
pulseA2 = Sine(amplitude=1, phase=np.pi/2, frequency=0.1,
start=pulseB1.after(2), end=pulseB2.end)
# Combine the two pulses into one waveform. The waveform class
# automatically puts the wave pulses in the correct order.
waveformA = Waveform('A', pulseA1, pulseA2)
# Create a waveform dictionary with the waveform labels as the keys.
# The waveforms will be aligned based on their start times. They
# will be zero-padded to ensure equal length that is longer than
# a minimum length, which is 20 in this example.
wfs, time_offset = wfs_dict(waveformA, waveformB, min_length=20)
print(wfs)
check_wfs(wfs)
print('Time offset = %d ns.' %time_offset)
# Gaussian pulse with amplitude of 1 starting at t = 0 ns and
# ending at t = 14 ns (duration is equal to 15 ns).
pulseC = Gaussian(amplitude=1, start=0, duration=15, end=14)
waveformC = Waveform('C', pulseC)
wfs, time_offset = wfs_dict(waveformA, waveformB, waveformC,
min_length=100)
print(wfs)
check_wfs(wfs)
print('Time offset = %d ns.' %time_offset)
# Create an in-phase and quadrature components of a harmonic pulse.
I, Q = Harmonic(amplitude=0.25, frequency=0.05, start=0,
duration=150)
wfs, time_offset = wfs_dict(Waveform('I', I), Waveform('Q', Q))
print(wfs)
check_wfs(wfs)
print('Time offset = %d ns.' %time_offset)
# Plot the waveforms for inspection.
plot_wfs(wfs, ['I', 'Q'], ['r', 'b'])
# Some animation.
for x in range(100):
# Create an in-phase and quadrature components of a harmonic
# pulse.
I, Q = Harmonic(amplitude=0.25, frequency=0.03, phase= x / 20,
start=0, duration=150)
wfs, time_offset = wfs_dict(Waveform('I', I), Waveform('Q', Q))
# Plot the waveforms for inspection.
plot_wfs(wfs, ['I', 'Q'], ['r', 'b']) | gpl-2.0 |
sebchalmers/DynOPFlow | DynOPFlow.py | 1 | 55808 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 20:18:08 2012
@author:
Sebastien Gros
Assistant Professor
Department of Signals and Systems
Chalmers University of Technology
SE-412 96 Göteborg, SWEDEN
grosse@chalmers.se
Python/casADi Module:
NMPC for Dynamic Optimal Power Flow and Power Dispatch
Requires the installation of the open-source Python module casADi together with the NLP solver ipopt
Required version of CasADi: v1.7.x
"""
from casadi import *
from casadi.tools import *
import math as math
import numpy as np
import matplotlib.pyplot as plt
import random as rand
#from ODE import *
plt.rcParams['text.usetex'] = False
#Fixed constants
#rho_air = 1.2
#rho_water = 1e3
#gravity = 9.81
def assertList(var):
if not(isinstance(var,list)):
var = [var]
return var
class Plant:
def __init__(self, Inputs = [], States = [], ExtParameters = [], R = 0., Directionality = 'Mono', Load = False, Bus = [], label = []): # set of reasons (strings) why the Dae cannot be modified (add new x/z/u/p/output)
self._frozen = False
self.Bus = Bus
self.label = label
self.Directionality = Directionality
self._Load = Load
self.R = R
#Plant Default Input structure (list ready to be embedded in a struct_sym)
InputList = [entry("CurrentReal"),
entry("CurrentImag")]
#Structure for INPUTS of various power plants
if (Load == True):
InputList.append(entry("ActivePower"))
InputList.append(entry("ReactivePower"))
elif (Directionality == 'Bi'):
InputList.append(entry("Pcharge"))
InputList.append(entry("Pdischarge"))
elif (Directionality == 'Mono'):
InputList.append(entry("Power"))
else:
print "Illegal option, ignored"
return
if (len(Inputs) > 0):
self._additionalInputs = Inputs #Keep the list for plotting purposes
for key in assertList(Inputs):
InputList.append(entry(key))
# States declared by the user
#if (len(States) > 0): ################ INTRODUCE THIS ######## !!!!
StateList = []
for key in assertList(States):
StateList.append(entry(key))
# External parameters declared by the user
if (len(ExtParameters) > 0):
ExtParamList = []
for key in assertList(ExtParameters):
ExtParamList.append(entry(key))
self.ExtParameters = struct_ssym(ExtParamList)
# lists of names (strings)
self.States = struct_ssym(StateList)
self.Inputs = struct_ssym(InputList)
self.InputsPrev = struct_ssym(InputList)
#Structure for plant bounds
Bound = [entry('Inputs', struct = self.Inputs)]
if (len(self.States.keys()) > 0):
Bound.append(entry('States', struct = self.States))
Bound = struct_ssym(Bound)
self.LB = Bound(-inf)
self.UB = Bound( inf)
if (Directionality == 'Mono') and (Load == False):
self.LB['Inputs','Power'] = 0.
elif (Directionality == 'Bi') and (Load == False):
self.LB['Inputs','Pcharge'] = 0.
self.LB['Inputs','Pdischarge'] = 0.
else:
self.LB['Inputs','ActivePower'] = 0.
def setDynamics(self, RHS = [], dt = 1., nstep = 10):
if (self._frozen == True):
print "Plant already added to the grid, call ignored"
return
print "Right-Hand side: ", RHS
if isinstance(RHS,list):
RHS = veccat(RHS)
X = self.States
U = self.Inputs
dtRK4 = dt/float(nstep)
fimplicit = SXFunction(daeIn(x=X,p=U),daeOut(ode=RHS))
fimplicit.init()
[k1] = daeOut(fimplicit.eval(daeIn( x=X,p=U )),"ode")
[k2] = daeOut(fimplicit.eval(daeIn( x=X+0.5*dtRK4*k1,p=U )),"ode")
[k3] = daeOut(fimplicit.eval(daeIn( x=X+0.5*dtRK4*k2,p=U )),"ode")
[k4] = daeOut(fimplicit.eval(daeIn( x=X+dtRK4*k3,p=U )),"ode")
rk4_step = SXFunction([X,U],[X + (1./6)*dtRK4*(k1 + 2*k2 + 2*k3 + k4)])
rk4_step.init()
#CONSTRUCT SHOOTING
# -----------------------------------------------------------------------------
out = X
for i in range(0,nstep):
[out] = rk4_step.eval([out,U])
Shoot = SXFunction([X,U],[out])
#Shoot = SXFunction([X,U],[X+dt*RHS]) #Invoke a 1st order Euler...
Shoot.init()
self._Shoot = Shoot
def _BuildFunc(self, Expr, Terminal):
X = self.States
U = self.Inputs
Uprev = self.InputsPrev
if Terminal == False:
listFuncInput = [U, Uprev]
if (X.size > 0):
listFuncInput.append(X)
else:
listFuncInput = [X]
if hasattr(self,'ExtParameters'):
listFuncInput.append(self.ExtParameters)
Func = SXFunction(listFuncInput,[Expr])
Func.init()
return Func
def setConstraints(self, Const, Terminal = False):
if (self._frozen == True):
print "Plant already added to the grid, call ignored"
return
if not(isinstance(Const,list)):
Const = [Const]
#ConstFunc = self._BuildFunc(veccat(Const), Terminal)
if (Terminal == False):
self._StageConst = self._BuildFunc(veccat(Const), Terminal)
elif (Terminal == True):
self._TerminalConst = self._BuildFunc(veccat(Const), Terminal)
def setCost(self, Cost, Terminal = False):
if (self._frozen == True):
print "Plant already added to the grid, call ignored"
return
#CostFunc = self._BuildFunc(Cost, Terminal)
if (Terminal == False):
self._StageCost = self._BuildFunc(Cost, Terminal)
elif (Terminal == True):
self._TerminalCost = self._BuildFunc(Cost, Terminal)
class PowerGrid:
"""
Generates:
- Power FLow equations and Power plants dynamics
- OPF solver
- optimal grid control solver
"""
def __init__(self, NBus = 0,Graph = []):
self.NBus = NBus
self.Graph = Graph
self.PlantList = []
self._hasStates = False
self.PowerFlowBounds = {'Vmin' : 0,
'Vmax' : inf,
'LineCurrentMax' : inf }
#CONSTRUCT POWER FLOW
def Flow(self, OPFSolver = False):
NBus = self.NBus
NLine = np.size( self.Graph ,axis = 0)
print "Constructing Power Flow Equations, #Bus =",NBus, ", #Line =",NLine
#CONSTRUCT THE POWER FLOW EQUATIONS
Graph = self.Graph
# Bus admittance matrix: Inodal_injection = Y*V (Nodal current injection)
Y = np.array([ [ 0.0*1j for i in range(NBus) ] for j in range(NBus) ])
for k in range(NLine):
Y[Graph[k][0],Graph[k][0]] += 1/Graph[k][2]
Y[Graph[k][1],Graph[k][1]] += 1/Graph[k][2]
Y[Graph[k][0],Graph[k][1]] -= 1/Graph[k][2]
Y[Graph[k][1],Graph[k][0]] -= 1/Graph[k][2]
# Line admittance matrix (directed): Iline = L*V
L = np.array([ [ 0.0*1j for i in range(NBus) ] for j in range(NLine) ])
for k in range(NLine):
L[k,Graph[k][0]] = 1/Graph[k][2]
L[k,Graph[k][1]] = -1/Graph[k][2]
######## BUILD POWER FLOW EQUATIONS (Results in Function: Bus voltage, Bus power -> Residual, to be satisfied at every time stage)
#Bus Voltages (real and complex parts)
BusVoltages = struct_ssym([entry("Real",repeat = NBus),
entry("Imag",repeat = NBus)])
#Bus currents (for current limitation) I = Y*V
BusCurrentsReal = mul(np.real(Y),BusVoltages["Real",veccat]) - mul(np.imag(Y),BusVoltages["Imag",veccat])
BusCurrentsImag = mul(np.real(Y),BusVoltages["Imag",veccat]) + mul(np.imag(Y),BusVoltages["Real",veccat])
BusCurrents2 = BusCurrentsReal*BusCurrentsReal + BusCurrentsImag*BusCurrentsImag
#Bus voltage modules square (for voltage limitation)
BusVoltages2 = BusVoltages["Real",veccat]*BusVoltages["Real",veccat] + BusVoltages["Imag",veccat]*BusVoltages["Imag",veccat]
#Line currents
LineCurrentsReal = mul(np.real(L),BusVoltages["Real",veccat]) - mul(np.imag(L),BusVoltages["Imag",veccat])
LineCurrentsImag = mul(np.real(L),BusVoltages["Imag",veccat]) + mul(np.imag(L),BusVoltages["Real",veccat])
LineCurrents2 = LineCurrentsReal*LineCurrentsReal + LineCurrentsImag*LineCurrentsImag
#Build complex power injections at the bus
SReal = BusCurrentsReal*BusVoltages["Real",veccat] + BusCurrentsImag*BusVoltages["Imag",veccat]
SImag = BusCurrentsReal*BusVoltages["Imag",veccat] - BusCurrentsImag*BusVoltages["Real",veccat]
#Create functions for Current and Voltages**2
self.BusActivePowerFunc = SXFunction([BusVoltages],[SReal])
self.BusReactivePowerFunc = SXFunction([BusVoltages],[SImag])
self.BusCurrentsRealFunc = SXFunction([BusVoltages],[BusCurrentsReal])
self.BusCurrentsImagFunc = SXFunction([BusVoltages],[BusCurrentsImag])
self.LineCurrents2Func = SXFunction([BusVoltages],[LineCurrents2])
self.BusVoltages2Func = SXFunction([BusVoltages],[BusVoltages2])
self.BusVoltages = BusVoltages
self.BusActivePowerFunc.init()
self.BusReactivePowerFunc.init()
self.BusCurrentsRealFunc.init()
self.BusCurrentsImagFunc.init()
self.LineCurrents2Func.init()
self.BusVoltages2Func.init()
self.OPF = True
#CONSTRUCT OPF SOLVER IS ASKED IN THE FLOW FUNCTION OPTIONS
if (OPFSolver == True):
print "Construct OPF Solver"
Power = struct_ssym([
entry('Active', repeat = NBus),
entry('Reactive', repeat = NBus)
])
V = struct_msym([
entry('BusPower', struct = Power),
entry('BusVoltages', struct = BusVoltages)
])
[BusActivePower] = self.BusActivePowerFunc.call([V['BusVoltages']])
[BusReactivePower] = self.BusReactivePowerFunc.call([V['BusVoltages']])
[LineCurrents2] = self.LineCurrents2Func.call([V['BusVoltages']])
[BusVoltages2] = self.BusVoltages2Func.call([V['BusVoltages']])
ActivePowerBalance = BusActivePower - V['BusPower','Active', veccat]
ReactivePowerBalance = BusReactivePower - V['BusPower','Reactive',veccat]
g = struct_MX([
entry('ActivePower', expr = ActivePowerBalance),
entry('ReactivePower', expr = ReactivePowerBalance),
entry('LineCurrents2', expr = LineCurrents2),
entry('BusVoltages2', expr = BusVoltages2)
])
Cost = 0
for line in range(NLine):
Cost += np.real(Graph[line][2])*LineCurrents2[line]
nl = MXFunction(nlpIn(x=V),nlpOut(f=Cost,g=g))
nl.init()
# set-up solver
solver = IpoptSolver(nl)
#solver.setOption("print_level",0)
solver.setOption("expand",True)
solver.setOption("parametric",False)
solver.setOption("generate_hessian",True)
solver.setOption("max_iter",1000)
solver.setOption("tol",1e-6)
solver.setOption("linear_solver","ma27")
solver.init()
Hessian = solver.hessLag()
Hessian.init()
Jacobian = solver.jacG()
Jacobian.init()
JacCost = solver.gradF()
JacCost.init()
self.VOPF = V
self.gOPF = g
self.OPF = solver
self._HessOPF = Hessian
self._JacOPF = Jacobian
self._JacCostOPF = JacCost
def OPFSolve(self, Grid = []):
lbV = self.VOPF(-inf)
ubV = self.VOPF( inf)
lbg = self.gOPF()
ubg = self.gOPF()
if not(hasattr(self,'OPF')):
#Check that .Flow() has been called
print "You must call .Flow(OPFSolver = True) to setup OPF before calling .OPFSolve()"
return []
if (self.OPF == True):
#Check that a solver exists
print "You must call .Flow(OPFSolver = True) to setup OPF before calling .OPFSolve()"
return []
#Set initial guess
init = self.VOPF()
init['BusVoltages','Real',veccat] = 1.
#Set the bounds (default values if not defined)
lbV = self.VOPF(-inf)
ubV = self.VOPF( inf)
lbg = self.gOPF()
ubg = self.gOPF()
#Ascertain the completness of the PowerFlowBounds dictionary, complete if necessary
if not('Vmin' in self.PowerFlowBounds.keys()):
self.PowerFlowBounds['Vmin'] = 0
print "Min Bus Voltage not provided, default value assigned (0)"
if not('Vmax' in self.PowerFlowBounds.keys()):
self.PowerFlowBounds['Vmax'] = inf
print "Max Bus Voltage not provided, default value assigned (inf)"
if not('LineCurrentMax' in self.PowerFlowBounds.keys()):
self.PowerFlowBounds['LineCurrentMax'] = inf
print "Max Line Current not provided, default value assigned (inf)"
ubg['LineCurrents2'] = np.array(self.PowerFlowBounds['LineCurrentMax'])**2
lbg['BusVoltages2'] = np.array(self.PowerFlowBounds['Vmin'])**2
ubg['BusVoltages2'] = np.array(self.PowerFlowBounds['Vmax'])**2
#Assign Network operational conditions
for entry in range(np.size(Grid)):
if (Grid[entry]['Property'] == 'slack'):
print "Bus", Grid[entry]['Bus'],"is slack"
lbg['BusVoltages2',Grid[entry]['Bus']] = Grid[entry]['V']**2
ubg['BusVoltages2',Grid[entry]['Bus']] = Grid[entry]['V']**2
lbV['BusVoltages','Imag',Grid[entry]['Bus']] = 0.0
ubV['BusVoltages','Imag',Grid[entry]['Bus']] = 0.0
elif (Grid[entry]['Property'] == 'PV'):
print "Bus", Grid[entry]['Bus'],"is PV"
lbg['BusVoltages2',Grid[entry]['Bus']] = Grid[entry]['V']**2
ubg['BusVoltages2',Grid[entry]['Bus']] = Grid[entry]['V']**2
lbV['BusPower','Active',Grid[entry]['Bus']] = Grid[entry]['P']
ubV['BusPower','Active',Grid[entry]['Bus']] = Grid[entry]['P']
elif (Grid[entry]['Property'] == 'PQ'):
print "Bus", Grid[entry]['Bus'],"is PQ"
lbV['BusPower','Active',Grid[entry]['Bus']] = Grid[entry]['P']
ubV['BusPower','Active',Grid[entry]['Bus']] = Grid[entry]['P']
lbV['BusPower','Reactive',Grid[entry]['Bus']] = Grid[entry]['Q']
ubV['BusPower','Reactive',Grid[entry]['Bus']] = Grid[entry]['Q']
self.OPF.setInput( lbV, "lbx")
self.OPF.setInput( ubV, "ubx")
self.OPF.setInput(init, "x0" )
self.OPF.setInput( lbg, "lbg")
self.OPF.setInput( ubg, "ubg")
self.OPF.solve()
self.lbg = lbg
self.ubg = ubg
self.lbV = lbV
self.ubV = ubV
return self.VOPF(self.OPF.output('x'))
########### POWER DISPACTH PROBLEM ##########
def addPlant(self, plant):
if isinstance(plant,list): #Treat list of plants
for plant_k in plant:
self.addPlant(plant_k)
else:
if (plant._frozen == True):
print "Plant already added to the grid, call ignored"
return
self.PlantList.append(plant)
plant._frozen = True
if hasattr(plant,'_Shoot'):
self._hasStates = True
def _VariableConstructor(self, N):
###### CONSTRUCT DECISION VARIABLES OF LENGTH N #######
List = []
for plant in self.PlantList:
List.append(entry(plant.label, struct = plant.Inputs))
Inputs = struct_ssym(List)
List = []
for plant in self.PlantList:
if (len(plant.States.keys()) > 0):
List.append(entry(plant.label, struct = plant.States))
States = struct_ssym(List)
#Structures to manipulate initial conditions and inputs
u0 = struct_msym(Inputs)
x0 = struct_msym(States)
#User-specified additional parameters
EPList = []
for plant in self.PlantList:
if hasattr(plant,'ExtParameters'):
EPList.append(entry(plant.label, struct = plant.ExtParameters))
ExtParameters = struct_msym(EPList)
EP = struct_msym([
entry('u0', struct = u0),
entry('ExtParameters', struct = ExtParameters)
])
Vlist = []
Vlist.append(entry("BusVoltages", repeat = N, struct = self.BusVoltages))
if (self._hasStates == True):
Vlist.append(entry("States", repeat = N+1, struct = States))
Vlist.append(entry("Inputs", repeat = N, struct = Inputs))
V = struct_msym(Vlist)
return V, u0, x0, EP, ExtParameters
def _CostConstructor(self, V, EP, Nstage, GridLoss):
"""
Constructor for the Cost function, handy to build the cost for different V:s
"""
Cost_Lagrange = 0
#Grid loss
if (GridLoss == True):
NLine = len( self.Graph )
for k in range(Nstage):
# Grid losses
[LineCurrents2_k] = self.LineCurrents2Func.call([V['BusVoltages',k]])
for line in range(NLine):
Cost_Lagrange += np.real(self.Graph[line][2])*LineCurrents2_k[line]
#Plants Lagrange cost
for plant in self.PlantList:
for k in range(Nstage):
if (hasattr(plant,'_StageCost')):
CostInputList = [V['Inputs',k,plant.label]]
if (k==0):
CostInputList.append( EP['u0',plant.label] )
else:
CostInputList.append( V['Inputs',k-1,plant.label])
if (plant.States.size > 0):
CostInputList.append(V['States',k,plant.label])
if hasattr(plant,'ExtParameters'):
CostInputList.append(EP['ExtParameters',plant.label])
[Cost_k] = plant._StageCost.call(CostInputList)
Cost_Lagrange += Cost_k
#Plants Terminal cost
Cost_Terminal = 0
for plant in self.PlantList:
if (hasattr(plant,'_TerminalCost')):
CostInputList = [V['States',-1,plant.label]]
if hasattr(plant,'ExtParameters'):
CostInputList.append(EP['ExtParameters',plant.label])
[Cost_k] = plant._TerminalCost.call(CostInputList)
Cost_Terminal += Cost_k
Cost = (Cost_Lagrange+Cost_Terminal)/Nstage
LagrangeCostFunc = MXFunction([V,EP],[Cost_Lagrange])
LagrangeCostFunc.init()
TerminalCostFunc = MXFunction([V,EP],[Cost_Terminal])
TerminalCostFunc.init()
return Cost, LagrangeCostFunc, TerminalCostFunc
def Dispatch(self, Horizon = 24, Simulation = 0, GridLoss = True):
"""
Constructs the power dispatch problem, default Horizon length (if argument Horizon is not provided) is 24 time units
"""
if (self.OPF == False):
Power.Flow(self)
print "Construct Dynamic OPF"
#THorizon = self.THorizon
Nstage = Horizon#self.TimeSetup['Horizon']
NBus = self.NBus
NLine = len( self.Graph )
TransferBus = []
BusProperties = []
for Bus in range(NBus):
Busk = 'transfer'
BuskProperties = []
for plant in self.PlantList:
if ( plant.Bus == Bus):
Busk = 'open'
BuskProperties.append(plant.label)
if (BuskProperties == []):
TransferBus.append(Bus)
BusProperties.append({Bus: BuskProperties})
################### CONSTRUCT VARIABLES ########################
V, u0, x0, EP, ExtParameters = self._VariableConstructor(Nstage)
#Structure for storing NMPC solutions if Nsim provided
if (Simulation > 0):
Vstore,_,_,_,_ = self._VariableConstructor(Simulation)
self.Vstore = Vstore()
############################### BUILD COST AND CONSTRAINTS ###############################
Cost, LagrangeCostFunc, TerminalCostFunc = self._CostConstructor(V, EP, Nstage, GridLoss)
if (Simulation > 0):
_, self.LagrangeCost, self.TerminalCost = self._CostConstructor(Vstore, EP, Simulation, GridLoss)
else:
self.LagrangeCost = LagrangeCostFunc
self.TerminalCost = TerminalCostFunc
# OPF constraints
CurrentBalance = []
LineCurrents2 = []
BusVoltages2 = []
# Generic constraints
PeriodicConst = []
EquConst = []
IneqConst = []
# Thermal constraints
ThermalConst = []
ThermalConstExt = []
######### BUILD COST & CONSTRAINTS #######
for k in range(Nstage): #k is reserved for time instant throughout the code
### CONSTRUCT POWER FLOW
#Construct (Bus Voltages)**2 and (Line current)**2 for bounding module
[LineCurrents2_k] = self.LineCurrents2Func.call([V['BusVoltages',k]])
[BusVoltages2_k] = self.BusVoltages2Func.call([V['BusVoltages',k]])
LineCurrents2.append(LineCurrents2_k)
BusVoltages2.append(BusVoltages2_k)
#Compute Bus Injection Currents
[CurrentsBalanceReal] = self.BusCurrentsRealFunc.call([V['BusVoltages',k]])
[CurrentsBalanceImag] = self.BusCurrentsImagFunc.call([V['BusVoltages',k]])
for plant in self.PlantList:
#Bus Voltage for the selected plant/load
BusVoltageReal = V['BusVoltages',k,'Real'][plant.Bus]
BusVoltageImag = V['BusVoltages',k,'Imag'][plant.Bus]
#Plant Current of the selected plant/load
PlantCurrentReal = V['Inputs',k,plant.label,'CurrentReal']
PlantCurrentImag = V['Inputs',k,plant.label,'CurrentImag']
# Balance the participating currents of the various plants and loads with
# the current injection @ the corresponding buses
CurrentsBalanceReal[plant.Bus] -= PlantCurrentReal
CurrentsBalanceImag[plant.Bus] -= PlantCurrentImag
# Re{V.iplant*} -> "Participating Active Power" // Im{V.iplant*} -> "Participating Reactive Power"
ParticipatingActivePower = BusVoltageReal*PlantCurrentReal + BusVoltageImag*PlantCurrentImag
ParticipatingReactivePower = BusVoltageImag*PlantCurrentReal - BusVoltageReal*PlantCurrentImag
# Plant participating current squared, i.e. |i|**2
PlantCurrent2 = PlantCurrentReal*PlantCurrentReal + PlantCurrentImag*PlantCurrentImag
if (plant._Load == True):
#Load fixing: [Active, Reactive] = Consumed Active / Reactive power
EquConst.append(ParticipatingActivePower - V['Inputs',k,plant.label,'ActivePower'])
EquConst.append(ParticipatingReactivePower - V['Inputs',k,plant.label,'ReactivePower'])
else:
if (plant.Directionality == 'Mono'):
PlantPower = V['Inputs',k,plant.label,'Power']
else:
PlantPower = V['Inputs',k,plant.label,'Pdischarge'] - V['Inputs',k,plant.label,'Pcharge']
#Compute balance between Pmech and participating power for each plant
# ParticipatingPower + R*|iplant|**2 - PlantPower = 0
EquConst.append(ParticipatingActivePower + plant.R*PlantCurrent2 - PlantPower)
CurrentBalance.append(CurrentsBalanceReal)
CurrentBalance.append(CurrentsBalanceImag)
### CONSTRUCT DYNAMIC CONSTRAINTS
for plant in self.PlantList:
if hasattr(plant,'_Shoot'):
[Xp] = plant._Shoot.call([V['States',k,plant.label],V['Inputs',k,plant.label]])
EquConst.append(Xp-V['States',k+1,plant.label])
#A bit ugly...
if hasattr(plant,'_StageConst'):
#print "Plant", plant.label, "has stage inequality constraints"
ConstInputList = [V['Inputs',k,plant.label]]
if (k==0):
ConstInputList.append( EP['u0',plant.label] )
else:
ConstInputList.append( V['Inputs',k-1,plant.label])
if (plant.States.size > 0):
ConstInputList.append(V['States',k,plant.label])
[Const_k] = plant._StageConst.call(ConstInputList)
IneqConst.append(Const_k)
### END OF STAGE CONSTRAINTS
for plant in self.PlantList:
if (hasattr(plant,'_TerminalConst')):
#print "Plant", plant.label, "has terminal inequality constraints"
[Const_k] = plant._TerminalConst.call([V['States',-1,plant.label]])
IneqConst.append(Const_k)
#### PERIODIC CONSTRAINTS
#PeriodicConst.append(V['States',-1,'Storage','Energy'] - V['States',0,'Storage','Energy'])
#PeriodicConst.append(V['States',-1,'Hydro','WaterHeight'] - V['States',0,'Hydro','WaterHeight'])
#PeriodicConst.append(V['Inputs',-1,'Thermal','Power'] - V['Inputs',0,'Thermal','Power'])
######## END CONSTRAINTS BUILDING ######
g = struct_MX([
entry("CurrentBalance", expr = CurrentBalance),
entry("BusVoltages2", expr = BusVoltages2),
entry("LineCurrents2", expr = LineCurrents2),
#entry('Periodic', expr = PeriodicConst),
entry('EquConst', expr = veccat(EquConst)),
entry('IneqConst', expr = veccat(IneqConst))
])
nl = MXFunction(nlpIn(x=V,p=EP),nlpOut(f=Cost,g=g))
nl.init()
# set-up solver
solver = IpoptSolver(nl)
solver.setOption("expand",True)
solver.setOption("print_level",0)
solver.setOption("parametric",True)
solver.setOption("hessian_approximation","exact")
solver.setOption("max_iter",2000)
solver.setOption("tol",1e-6)
solver.setOption("linear_solver","ma27")
solver.init()
Hessian = solver.hessLag()
Hessian.init()
Jacobian = solver.jacG()
Jacobian.init()
JacCost = solver.gradF()
JacCost.init()
self._HessOptDispatch = Hessian
self._JacOptDispatch = Jacobian
self._JacCostOptDispatch = JacCost
self.u0 = u0
self.x0 = x0
self.ExtParameters = ExtParameters
self._EP = EP
self.VOptDispatch = V
self.OptDispatch = solver
self.gOptDispatch = g
self.Properties = BusProperties
print self.Properties
############## SOLVER CONSTRUCTED ##############
#BUILD FIRST INITIAL GUESS
def init(self, x0 = [], u0 = []):
init = self.VOptDispatch()
NBus = self.NBus
NLine = len( self.Graph )
for plant in self.PlantList:
if hasattr(plant,'_Shoot'):
init['States',:,plant.label] = 0.5*(plant.LB['States'] + plant.UB['States'])
for index in range(init.size):
if not(init.cat[index] < inf):
init.cat[index] = 0.
for bus in range(NBus):
init['BusVoltages',:,'Real',bus] = 0.5*(self.PowerFlowBounds['Vmin'][bus]+self.PowerFlowBounds['Vmax'][bus])
init['BusVoltages',:,'Imag',bus] = 0.0
init['Inputs',:,...,'CurrentReal'] = 1.0
init['Inputs',:,...,'CurrentImag'] = 1.0
return init
def Profiles(self, N = 0):
"""
CREATE A STRUCTURE FOR HANDLING THE PROFILES OF THE POWER GRID:
If no argument passed, the profiles have the horizon length, if argument N is assigned, the profiles have the length of NSample
"""
if (N == 0):
if hasattr(self,'VOptDispatch'):
Nstage = len(self.VOptDispatch['Inputs'])
Nprofile = Nstage + 1
else:
print "Profile Error: cannot resolve the length of profile. Specify a horizon (N = ...) or call .Dispatch first"
return
else:
if hasattr(self,'VOptDispatch'):
Nstage = len(self.VOptDispatch['Inputs'])
Nprofile = N + Nstage + 1
else:
Nprofile = N + 1
self.Nprofile = Nprofile
VProfile,_,_,_,_ = self._VariableConstructor(self.Nprofile)
self.LBProfiles = VProfile()
self.UBProfiles = VProfile()
for plant in self.PlantList:
self.LBProfiles['Inputs',:,plant.label] = plant.LB['Inputs']
self.UBProfiles['Inputs',:,plant.label] = plant.UB['Inputs']
if hasattr(plant,'_Shoot'):
self.LBProfiles['States',:,plant.label] = plant.LB['States']
self.UBProfiles['States',:,plant.label] = plant.UB['States']
return Nprofile
#ASSIGN PROFILES & SOLVE
def DYNSolve(self, x0 = [], u0 = 0., ExtParameters = [], init = [], time = 0, Periodic = False):
lbV = self.VOptDispatch(-inf)
ubV = self.VOptDispatch( inf)
lbg = self.gOptDispatch()
ubg = self.gOptDispatch()
ubg["IneqConst"] = 0.
lbg["IneqConst"] = -inf
NBus = self.NBus
NLine = len( self.Graph )
####### SETUP THE BOUNDS #########
for plant in self.PlantList:
lbV['Inputs',:,plant.label] = self.LBProfiles['Inputs',time:,plant.label]
ubV['Inputs',:,plant.label] = self.UBProfiles['Inputs',time:,plant.label]
if hasattr(plant,'_Shoot'):
lbV['States',:,plant.label] = self.LBProfiles['States',time:,plant.label]
ubV['States',:,plant.label] = self.UBProfiles['States',time:,plant.label]
#Power flow limitations
lbg["BusVoltages2"] = np.array(self.PowerFlowBounds['Vmin'])**2
ubg["BusVoltages2"] = np.array(self.PowerFlowBounds['Vmax'])**2
ubg["LineCurrents2"] = np.array(self.PowerFlowBounds['LineCurrentMax'])**2
#Introduce additional bounds on all current and voltages (taken from Power flow limitation)
# Bus voltages
for bus in range(NBus):
ubV['BusVoltages',:,'Real',bus] = self.PowerFlowBounds['Vmax'][bus]
ubV['BusVoltages',:,'Imag',bus] = self.PowerFlowBounds['Vmax'][bus]
lbV['BusVoltages',:,'Real',bus] = -self.PowerFlowBounds['Vmax'][bus]
lbV['BusVoltages',:,'Imag',bus] = -self.PowerFlowBounds['Vmax'][bus]
ubV["BusVoltages",:,"Imag",0] = 0.
lbV["BusVoltages",:,"Imag",0] = 0.
###### EMBBED INITIAL CONDITIONS #######
if (self._hasStates == True):
print "Initial Condition embedding"
lbV['States',0] = x0
ubV['States',0] = x0
###### PERIODIC CONSTRAINTS (IF REQUIRED) #######
#if (Periodic == False):
# lbg['Periodic'] = -inf
# ubg['Periodic'] = inf
EP = self._EP()
if not(ExtParameters == []):
EP['ExtParameters'] = ExtParameters
EP['u0'] = u0
self.OptDispatch.setInput(lbV, "lbx")
self.OptDispatch.setInput(ubV, "ubx")
self.OptDispatch.setInput(init, "x0" )
self.OptDispatch.setInput(lbg, "lbg")
self.OptDispatch.setInput(ubg, "ubg")
self.OptDispatch.setInput(EP, "p")
self.OptDispatch.solve()
self.lbV = lbV
self.ubV = ubV
self.ubg = ubg
self.lbg = lbg
self.ep = EP
v_opt = self.VOptDispatch(self.OptDispatch.output("x"))
success = int(self.OptDispatch.getStat('return_status') == 'Solve_Succeeded')
return v_opt, success
def Shift(self, Sol):
SolShifted = self.VOptDispatch()
for key in Sol.keys():
Nelements = len(Sol[key])
IndexTime = [k for k in range( 1,Nelements ) ]
IndexTimePlus = [k for k in range( Nelements-1 ) ]
SolShifted[key,IndexTimePlus] = Sol[key,IndexTime]
SolShifted[key,-1] = Sol[key,-1]
return self.VOptDispatch(SolShifted)
def Simulate(self, Sol, x0, u0):
#To be replaced by a genuine simulation in the future...
x0plus = self.x0(Sol['States',1])
u0plus = self.u0(Sol['Inputs',0])
return x0plus, u0plus
def NMPCSimulation(self, x0 = [], u0 = [], ExtParameters = [], init = [], Simulation = 0):
##### NMPC Loop #####
NMPC = {'time': 0, 'success' : [], 'Traj' : []}
Vstore = self.Vstore
while (NMPC['time'] < Simulation):
Sol, stats = self.DYNSolve(x0 = x0, u0 = u0, ExtParameters = ExtParameters, time = NMPC['time'], init = init)
NMPC['success'].append(stats)
NMPC['Traj'].append(Sol)
Vstore[...,NMPC['time']] = Sol[...,0]
init = self.Shift(Sol)
x0, u0 = self.Simulate(Sol,x0, u0)
NMPC['time'] += 1
EP = self._EP()
EP['u0'] = u0
self.LagrangeCost.setInput(Vstore,0)
self.LagrangeCost.setInput(EP,1)
self.LagrangeCost.evaluate()
NMPC['LagrangeCost'] = self.LagrangeCost.output()
return Vstore, NMPC
#Extract results
def ExtractInfo(self, v_opt, BusPower = True, PlantPower = True, TotalPower = True):
self.SolutionInfo = {}
Nstage = len(v_opt['Inputs'])
NBus = self.NBus
NLine = len( self.Graph )
#DEFAULT EXTRACTION
#Bus voltages (module and angles), Line Currents
self.SolutionInfo['BusVoltagesModule'] = np.concatenate([np.array(np.sqrt(v_opt["BusVoltages",k,"Real",veccat]*v_opt["BusVoltages",k,"Real",veccat] + v_opt["BusVoltages",k,"Imag",veccat]*v_opt["BusVoltages",k,"Imag",veccat])).T for k in range(Nstage)],axis=0)
self.SolutionInfo['BusVoltagesAngle'] = np.concatenate([np.array([180*math.atan2(v_opt["BusVoltages",k,"Imag",bus],v_opt["BusVoltages",k,"Real",bus])/pi for bus in range(NBus)]).reshape(NBus,1) for k in range(Nstage)], axis = 1).T
LineCurrents_opt = []
for k in range(Nstage):
self.LineCurrents2Func.setInput(v_opt["BusVoltages",k])
self.LineCurrents2Func.evaluate()
LineCurrents_opt.append(sqrt(self.LineCurrents2Func.output()))
self.SolutionInfo['LineCurrentsModule'] = np.concatenate(LineCurrents_opt,axis=1)
#### Total Powers
if (TotalPower == True):
TotalPower = {}
TotalPower['Load'] = 0
for plant in [plant for plant in self.PlantList if (plant._Load == True)]:
TotalPower['Load'] -= np.array(v_opt['Inputs',:,plant.label,'ActivePower'])
TotalPower['Injected'] = 0.
for plant in [plant for plant in self.PlantList if not(plant._Load == True)]:
if (plant.Directionality == 'Mono'):
TotalPower['Injected'] += np.array(v_opt['Inputs',:,plant.label,'Power'])
else:
TotalPower['Injected'] += np.array(v_opt['Inputs',:,plant.label,'Pdischarge']) - np.array(v_opt['Inputs',:,plant.label,'Pcharge'])
self.SolutionInfo['TotalPower'] = TotalPower
#Construct implicit values in the network
if (BusPower == True):
BusActivePower = []
BusReactivePower = []
BusCurrentModule = []
BusCurrentAngle = []
for k in range(Nstage):
self.BusActivePowerFunc.setInput( v_opt['BusVoltages',k],0)
self.BusReactivePowerFunc.setInput(v_opt['BusVoltages',k],0)
self.BusCurrentsRealFunc.setInput( v_opt['BusVoltages',k],0)
self.BusCurrentsImagFunc.setInput( v_opt['BusVoltages',k],0)
self.BusActivePowerFunc.evaluate()
self.BusReactivePowerFunc.evaluate()
self.BusCurrentsRealFunc.evaluate()
self.BusCurrentsImagFunc.evaluate()
BusCurrentReal_k = np.array(self.BusCurrentsRealFunc.output())
BusCurrentImag_k = np.array(self.BusCurrentsImagFunc.output())
BusActivePower.append( np.array(self.BusActivePowerFunc.output()).T)
BusReactivePower.append( np.array(self.BusReactivePowerFunc.output()).T)
BusCurrentModule.append( sqrt(BusCurrentReal_k**2 + BusCurrentImag_k**2).T )
BusCurrentAngle.append( np.array([180*math.atan2(BusCurrentImag_k[bus],BusCurrentReal_k[bus])/pi for bus in range(NBus) ]).reshape(1,6) )
self.SolutionInfo['BusActivePower'] = np.concatenate( BusActivePower, axis=0)
self.SolutionInfo['BusReactivePower'] = np.concatenate( BusReactivePower, axis=0)
self.SolutionInfo['BusCurrentModule'] = np.concatenate( BusCurrentModule, axis=0)
self.SolutionInfo['BusCurrentAngle'] = np.concatenate( BusCurrentAngle, axis=0)
if (PlantPower == True):
PlantActivePowerDictionary = {}
PlantReactivePowerDictionary = {}
CosPhiDictionary = {}
for plant in self.PlantList:
#Data = self.Plants[key]
#Nplant = np.size(Data,axis = 0)
PlantActivePower = []
PlantReactivePower = []
CosPhi = []
TanPhi = []
Bus = plant.Bus
CurrentReal = veccat(v_opt['Inputs',:,plant.label,'CurrentReal'])
CurrentImag = veccat(v_opt['Inputs',:,plant.label,'CurrentImag'])
BusVoltageReal = veccat(v_opt['BusVoltages',:,'Real',Bus])
BusVoltageImag = veccat(v_opt['BusVoltages',:,'Imag',Bus])
PlantActivePower_plant = np.array(BusVoltageReal*CurrentReal + BusVoltageImag*CurrentImag)
PlantReactivePower_plant = np.array(BusVoltageImag*CurrentReal - BusVoltageReal*CurrentImag)
PlantApparentPower_plant = sqrt(PlantReactivePower_plant**2 + PlantActivePower_plant**2)
PlantActivePower.append(PlantActivePower_plant)
PlantReactivePower.append(PlantReactivePower_plant)
CosPhi.append(PlantActivePower_plant/PlantApparentPower_plant)
PlantActivePowerDictionary[plant.label] = PlantActivePower
PlantReactivePowerDictionary[plant.label] = PlantReactivePower
CosPhiDictionary[plant.label] = CosPhi
self.SolutionInfo['PlantActivePower'] = PlantActivePowerDictionary
self.SolutionInfo['PlantReactivePower'] = PlantReactivePowerDictionary
self.SolutionInfo['PlantCosPhi'] = CosPhiDictionary
#### RESULT PLOTTING #####
def DYNSolvePlot(self, v_opt, NMPC = False, dt = 1, Path = [], LW = 1, Show = True):
SavedFigs = []
def SaveFig(Path,Name):
if not(Path == []):
SavedFigs.append(Name)
plt.tight_layout()
plt.savefig(Path+'/'+Name+'.eps',format='eps', facecolor='w', edgecolor='k',bbox_inches='tight')
#plt.close()
return SavedFigs
if (len(v_opt['States']) < 2):
print "Plotting warning: no time sequence available, run with .Nsim > 1. Plotting not proceeding."
return
#Nstage = self.TimeSetup['Horizon']
NBus = self.NBus
NLine = len( self.Graph )
##### Prepares time grids #####
#dt = self.TimeSetup['dt']
time = {}
for key in ['States','Inputs']:
time[key] = np.array([k*dt for k in range(len(v_opt[key]))]).T
# construct a list of the plants (excluding the loads)
PlantList = [plant for plant in self.PlantList if not(plant._Load == True)]
SizeSubplt = np.ceil(sqrt(len(PlantList)))
SizeSubpltAll = np.ceil(sqrt(len(self.PlantList)))
##### plot plot plot #####
plt.figure(1)
plt.subplot(2,3,1)
plt.hold('on')
for k in range(NBus):
plt.step(time['Inputs'],self.SolutionInfo['BusVoltagesModule'][:,k],where = 'post', label = str(k), linewidth = LW)
plt.ylabel('kV')
#plt.xlabel('time (s)')
plt.title("Voltages, |.|")
plt.grid()
plt.subplot(2,3,2)
plt.hold('on')
for k in range(NBus):
plt.step(time['Inputs'],self.SolutionInfo['BusVoltagesAngle'][:,k],where = 'post', label = str(k), linewidth = LW)
plt.ylabel('deg')
#plt.xlabel('time (s)')
plt.title("Voltage, angle")
plt.grid()
plt.subplot(2,3,5)
plt.hold('on')
for k in range(NBus):
plt.step(time['Inputs'],1e-3*self.SolutionInfo['BusActivePower'][:,k],where = 'post', label = str(k), linewidth = LW)
plt.ylabel('GW')
plt.xlabel('time (s)')
plt.title("Active power")
plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
plt.grid()
plt.subplot(2,3,4)
plt.hold('on')
for k in range(NBus):
plt.step(time['Inputs'],1e-3*self.SolutionInfo['BusReactivePower'][:,k],where = 'post', label = str(k), linewidth = LW)
plt.ylabel('GW')
plt.xlabel('time (s)')
plt.title("Reactive power")
plt.grid()
plt.subplot(2,3,3)
plt.hold('on')
for k in range(NBus):
plt.step(time['Inputs'],self.SolutionInfo['BusCurrentModule'][:,k],where = 'post', label = str(k), linewidth = LW)
plt.ylabel('kA')
plt.xlabel('time (s)')
plt.title('Current, |.|')
#plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
plt.grid()
SaveFig(Path,'Grid')
plt.figure(2)
for k in range(NLine):
plt.step(time['Inputs'],self.SolutionInfo['LineCurrentsModule'][k,:],where = 'post', label = str(self.Graph[k][0])+'-'+str(self.Graph[k][1]), linewidth = LW)
plt.xlabel('time (s)')
plt.ylabel('kA')
plt.title("Lines current |.|")
plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
plt.grid()
SaveFig(Path,'Lines')
plt.figure(7)
fig = 1
for plant in self.PlantList:
plt.subplot(SizeSubpltAll,SizeSubpltAll,fig)
plt.step(time['Inputs'],1e-3*self.SolutionInfo['PlantActivePower'][plant.label][0], color = 'k', label = 'Act. Power', linewidth = LW)
plt.step(time['Inputs'],1e-3*self.SolutionInfo['PlantReactivePower'][plant.label][0], color = 'r', label = 'React. Power', linewidth = LW)
fig += 1
plt.ylabel('GW')
plt.xlabel('time (s)')
plt.title(str(plant.label)+', bus '+str(plant.Bus))
plt.grid()
plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
SaveFig(Path,'BusPower')
plt.figure(8)
plt.hold('on')
for plant in self.PlantList:
Power = []
if plant._Load == True:
Power = -np.array(v_opt['Inputs',:,plant.label,'ActivePower'])
elif plant.Directionality == 'Bi':
Power = np.array(v_opt['Inputs',:,plant.label,'Pdischarge']) - np.array(v_opt['Inputs',:,plant.label,'Pcharge'])
elif plant.Directionality == 'Mono':
Power = v_opt['Inputs',:,plant.label,'Power']
else:
print "Warning: plant power unidentified, not plotting"
if len(Power)>0:
plt.step(time['Inputs'],1e-3*np.array(Power), label = plant.label,where = 'post', linewidth = LW)
plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
plt.xlabel('time (s)')
plt.ylabel('GW')
plt.title('Plant power')
plt.grid()
SaveFig(Path,'PlantPower')
plt.figure(9)
plt.step(time['Inputs'],100*self.SolutionInfo['TotalPower']['Load']/self.SolutionInfo['TotalPower']['Injected'], label = plant.label,where = 'post', color = 'k', linewidth = LW)
plt.xlabel('time (s)')
plt.ylabel('%')
plt.title('Transmission efficiency')
plt.grid()
SaveFig(Path,'GridEfficiency')
plt.figure(10)
fig = 0
Nsubplot = 0
UnitDic = {'h': 'm', 'W_error': 'm/s','E': 'MJ'}
for plant in PlantList:
if hasattr(plant,'_Shoot'):
Nsubplot += len(plant.States.keys())
Nsubplot = ceil(sqrt(Nsubplot))
for plant in PlantList:
if hasattr(plant,'_Shoot'):
for key in plant.States.keys():
plt.subplot(Nsubplot,Nsubplot,fig)
plt.step(time['States'],np.array(v_opt['States',:,plant.label,key]),color = 'k', linewidth = LW)
plt.title(plant.label+', state: '+key)
plt.xlabel('time (s)')
plt.ylabel(UnitDic[key])
plt.grid()
fig += 1
SaveFig(Path,'PlantStates')
#Plant Detail
fig = 11
for plant in PlantList:
if hasattr(plant,'_additionalInputs') or hasattr(plant,'_Shoot'):
plt.figure(fig)
plt.title(plant.label)
subPltNum = 2
if hasattr(plant,'_additionalInputs'):
subPltNum += len(plant._additionalInputs)
if hasattr(plant,'_Shoot'):
subPltNum += len(plant.States.keys())
subPltNum = np.ceil(sqrt(subPltNum))
#Plot current
plt.subplot(subPltNum,subPltNum,0)
plt.step(time['Inputs'],np.array(v_opt['Inputs',:,plant.label,'CurrentReal']),where = 'post',label = 'Real Current')
plt.step(time['Inputs'],np.array(v_opt['Inputs',:,plant.label,'CurrentImag']),where = 'post',label = 'Complex Current')
plt.title('Current')
plt.legend()
plt.grid()
plt.subplot(subPltNum,subPltNum,1)
plt.title('Power')
if plant.Directionality == 'Mono':
plt.step(time['Inputs'],np.array(v_opt['Inputs',:,plant.label,'Power']),where = 'post',label = 'Power')
else:
plt.step(time['Inputs'],np.array(v_opt['Inputs',:,plant.label, 'Pcharge']),where = 'post',label = 'Pcharge')
plt.step(time['Inputs'],np.array(v_opt['Inputs',:,plant.label,'Pdischarge']),where = 'post',label = 'Pdischarge')
plt.legend()
plt.grid()
subplt = 2
if hasattr(plant,'_additionalInputs'):
for key in plant._additionalInputs:
subplt += 1
plt.subplot(subPltNum,subPltNum,subplt)
plt.step(time['Inputs'],np.array(v_opt['Inputs',:,plant.label,key]),where = 'post',label = key)
plt.legend()
plt.grid()
if hasattr(plant,'_Shoot'):
for key in plant.States.keys():
subplt += 1
plt.subplot(subPltNum,subPltNum,subplt)
plt.plot(time['States'],np.array(v_opt['States',:,plant.label,key]),label = key)
plt.legend()#bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
plt.grid()
fig += 1
if (Show == True):
plt.show()
else:
plt.close('all')
return SavedFigs
def DYNSolvePlotCompare(self, v_opt, NMPC = False, label = '', marker = '', linewidth = 1):
Nstage = self.TimeSetup['Horizon']
NBus = self.NBus
NLine = len( self.Graph )
NLoad = len( self.Plants['Load'] )
NStorage = len( self.Plants['Storage'] )
NWind = len( self.Plants['Wind'] )
NThermal = len( self.Plants['Thermal'] )
NHydro = len( self.Plants['Hydro'] )
dt = self.TimeSetup['dt']
time = {}
for key in ['States','Inputs']:
time[key] = np.array([k*dt for k in range(len(v_opt[key]))]).T
plt.figure(3)
fig = 1
for key in ['Wind','Hydro','Thermal','Storage']:
plt.subplot(2,2,fig)
plt.hold('on')
plt.title(key+' Power')
for plant in range(eval('N'+key)):
if (self.Plants[key][plant]['Directionality'] == 'mono'):
plt.step(time['Inputs'],np.array(v_opt['Inputs',:,key,plant,'Power']),where = 'post',label = label, linewidth = linewidth)
if (key == 'Wind'):
CPmax = self.Plants['Wind'][plant]['CPmax']
A = self.Plants['Wind'][plant]['A']
PWind = 0.5*rho_air*A*CPmax*np.array(v_opt['States',:,'Wind',plant,'WindSpeed'])**3
plt.step(time['States'],PWind,color='k',where = 'post')
plt.hlines(self.Plants['Wind'][plant]['Pmax'], time['States'][0], time['States'][-1], colors='r', linestyles='dashed')
else:
plt.step(time['Inputs'],np.array(v_opt['Inputs',:,key,plant,'Pdischarge'])-np.array(v_opt['Inputs',:,key,plant,'Pcharge']),where = 'post',label = label, linewidth = linewidth)
fig += 1
#########
plt.figure(4)
plt.subplot(2,2,1)
plt.hold('on')
for plant in range(NHydro):
plt.plot(time['States'],np.array(v_opt['States',:,'Hydro',plant,'WaterHeight']),label = label, marker = marker, linewidth = linewidth)
plt.title('Water Height')
plt.subplot(2,2,2)
plt.hold('on')
for plant in range(NStorage):
plt.plot(time['States'],np.array(v_opt['States',:,'Storage',plant,'Energy']),label = label, marker = marker, linewidth = linewidth)
plt.title('Stored Energy')
plt.subplot(2,2,3)
plt.hold('on')
for plant in range(NHydro):
plt.step(time['Inputs'],sqrt(np.array(v_opt['Inputs',:,'Hydro',plant,'Pcharge'])*np.array(v_opt['Inputs',:,'Hydro',plant,'Pdischarge'])),where = 'post',label = label, marker = marker, linewidth = linewidth)
plt.title('Hydro Complementarity')
plt.subplot(2,2,4)
plt.hold('on')
for plant in range(NStorage):
plt.step(time['Inputs'],sqrt(np.array(v_opt['Inputs',:,'Storage',plant,'Pcharge'])*np.array(v_opt['Inputs',:,'Storage',plant,'Pdischarge'])),where = 'post',label = label, marker = marker, linewidth = linewidth)
plt.title('Storage Complementarity')
plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
plt.figure(8)
for plant in range(NWind):
plt.step(time['States'],veccat(v_opt['States',:,'Wind',plant,'WindSpeed']),label=label, marker = marker, linewidth = linewidth)
plt.title('Wind Speed m/s')
| gpl-2.0 |
genialis/resolwe-bio | resolwe_bio/tools/plot_enhancers.py | 1 | 1984 | #!/usr/bin/env python3
"""Parse coordinates for the hockey-stick plot."""
import argparse
import json
import pandas as pd
from resolwe_runtime_utils import error, send_message
parser = argparse.ArgumentParser(
description="Parse coordinates for the hockey-stick plot."
)
parser.add_argument("input_data", help="Input Data")
parser.add_argument("output", help="Output JSON file")
parser.add_argument("-c", action="store_true", help="Substract control sample data")
args = parser.parse_args()
xlabel = "Enhancers ranked by signal"
ylabel = "Total reads per million"
labels = []
data = pd.read_csv(args.input_data, header=5, sep="\t")
for line in open(args.input_data):
if "Using cutoff" in line:
cutoff = line.strip().split(" ")[3]
break
if args.c:
x_axis = data.iloc[:, 8][::-1]
y_axis = data.iloc[:, 6] - data.iloc[:, 7]
else:
x_axis = data.iloc[:, 7][::-1]
y_axis = data.iloc[:, 6]
n_sup_enh, rows = data[data.isSuper == 1].shape
chr_pos = data.CHROM.map(str) + ":" + data.START.map(str) + "-" + data.STOP.map(str)
if len(x_axis) != len(y_axis):
send_message(error("Scatter plot error. len(x_axis) != len(y_axis)"))
if len(labels) > 0 and len(labels) != len(x_axis):
send_message(error("Scatter plot error. len(labels) != len(x_axis)"))
data = {
"points": {"x_axis": list(x_axis), "y_axis": list(y_axis), "items": labels},
"annotations": [
{
"type": "line",
"x1": 0,
"y1": float(cutoff),
"x2": max(x_axis),
"y2": float(cutoff),
},
{
"type": "line",
"x1": len(x_axis) - n_sup_enh,
"y1": 0,
"x2": len(x_axis) - n_sup_enh,
"y2": max(y_axis),
},
],
"meta": {
"x_label": xlabel,
"y_label": ylabel,
"chr_pos": list(chr_pos),
"text": "Cutoff: {}".format(cutoff),
},
}
with open(args.output, "w") as f:
json.dump(data, f)
| apache-2.0 |
smharper/psipy | psipy/read_data.py | 1 | 9005 | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
class Viewer(object):
def __init__(self, df):
# Initialize data attribues.
self.df = df.copy()
self.energy_edges = np.array([])
self.azim_edges = np.array([])
self.coords = [0, 0, 0.0, 0.025]
# Initialize the figure and the axes.
self.fig = plt.figure()
self.pin_ax = self.fig.add_axes((0.05, 0.05, 0.40, 0.90),
aspect='equal')
self.spec_ax = self.fig.add_axes((0.55, 0.55, 0.40, 0.40))
self.spec_ax.set_xscale('log')
self.azim_ax = self.fig.add_axes((0.55, 0.05, 0.40, 0.40))
self.azim_ax.set_xlim(-np.pi, np.pi)
# Format the data.
self.format_dataframe()
# Draw the pincell and register event handlers.
self.draw_pincell()
self.fig.canvas.mpl_connect('button_press_event', self.onclick)
# Initialize plot actors.
self.cur_square = None
self.energy_line = None
self.azim_line = None
self.energy_marker = None
self.azim_marker = None
self.azim_ray = None
# Draw the plots with the starting data.
self.update_plot()
def format_dataframe(self):
# Get rid of useless columns.
del self.df['nuclide']
del self.df['score']
# Replace the mesh index labels.
self.df['i_x'] = self.df[('mesh 1', 'x')] - 1
self.df['i_y'] = self.df[('mesh 1', 'y')] - 1
del self.df[('mesh 1', 'x')]
del self.df[('mesh 1', 'y')]
del self.df[('mesh 1', 'z')]
# Remove rows for the unused mesh bins above the diagonal.
self.df = self.df[self.df['i_x'] >= self.df['i_y']]
# Extract the energy and azimuthal bin edges.
self.energy_edges = np.unique(self.df['energy low [MeV]'])
self.energy_edges = np.concatenate((self.energy_edges,
[self.df['energy high [MeV]'].max()]))
self.azim_edges = np.unique(self.df['azimuthal low'])
self.azim_edges = np.concatenate((self.azim_edges,
[self.df['azimuthal high'].max()]))
# Replace energy bounds with bin numbers.
bins = self.df['energy low [MeV]']
bins = [np.argmin(np.abs(self.energy_edges - a)) for a in bins]
self.df['energy'] = bins
del self.df['energy low [MeV]']
del self.df['energy high [MeV]']
# Replace azimuthal bounds with bin numbers.
bins = self.df['azimuthal low']
bins = [np.argmin(np.abs(self.azim_edges - a)) for a in bins]
self.df['azim'] = bins
del self.df['azimuthal low']
del self.df['azimuthal high']
# Convert MeV to eV.
self.energy_edges *= 1e6
# Double fluxes on boundary elements to account for area difference.
diagonal = self.df['i_x'] == self.df['i_y']
bottom = self.df['i_y'] == 0
right = self.df['i_x'] == self.df['i_x'].max()
corners = diagonal & (bottom | right)
for rows in (diagonal, bottom, right, corners):
for col in ('mean', 'std. dev.'):
self.df.loc[rows, col] *= 2
def draw_pincell(self):
# Draw patches for the materials.
fuel_color = np.array([255, 50, 50]) / 255.0
gap_color = np.array([220, 220, 220]) / 255.0
clad_color = np.array([150, 150, 150]) / 255.0
mod_color = np.array([100, 200, 200]) / 255.0
pitch = 0.62992 * 2.0
mod_patch = matplotlib.patches.Rectangle((-pitch/2.0, -pitch/2.0),
pitch, pitch, facecolor=mod_color)
clad_patch = matplotlib.patches.Circle((0.0, 0.0), radius=0.45720,
facecolor=clad_color)
gap_patch = matplotlib.patches.Circle((0.0, 0.0), radius=0.40005,
facecolor=gap_color)
fuel_patch = matplotlib.patches.Circle((0.0, 0.0), radius=0.39218,
facecolor=fuel_color)
self.pin_ax.add_patch(mod_patch)
self.pin_ax.add_patch(clad_patch)
self.pin_ax.add_patch(gap_patch)
self.pin_ax.add_patch(fuel_patch)
# Draw centerlines.
self.pin_ax.plot((-pitch/2.0, pitch/2.0), (0.0, 0.0), c='black',
linestyle='-.')
self.pin_ax.plot((0.0, 0.0), (-pitch/2.0, pitch/2.0), c='black',
linestyle='-.')
# Draw mesh gridlines.
delta = pitch / 2.0 / (10 - 1)
d0 = -delta / 2.0
self.pin_ax.plot((d0, pitch/2.0), (d0, d0), c='black')
self.pin_ax.plot((d0, d0), (d0, d0+delta), c='black')
for i in range(10):
d1 = d0 + i*delta
d2 = d1 + delta
self.pin_ax.plot((d1, pitch/2.0), (d2, d2), c='black')
self.pin_ax.plot((d1, d1), (d0, d2), c='black')
self.pin_ax.set_xlim((-pitch/2.0, pitch/2.0))
self.pin_ax.set_ylim((-pitch/2.0, pitch/2.0))
def onclick(self, event):
if event.inaxes == self.pin_ax:
# Index the selected bin.
pitch = 0.62992 * 2.0
delta = pitch / 2.0 / 9
x = event.xdata + delta/2.0
y = event.ydata + delta/2.0
i_x = int(x / delta)
i_y = int(y / delta)
# Ignore the click if it was outside of the mesh.
if i_x < 0 or i_y < 0 or i_y > i_x: return
# Update the coordinates.
self.coords[0] = i_x
self.coords[1] = i_y
# Update the plot.
self.update_plot()
elif event.inaxes == self.spec_ax:
self.coords[3] = event.xdata
self.update_plot()
elif event.inaxes == self.azim_ax:
self.coords[2] = event.xdata
self.update_plot()
def update_plot(self):
# Unpack the current phase-space coordinates.
i_x, i_y, azim, energy = self.coords
# Find the azimuthal and energy bin indices.
a_bin = np.searchsorted(self.azim_edges, azim) - 1
e_bin = np.searchsorted(self.energy_edges, energy) - 1
# Highlight the selected spatial bin.
pitch = 0.62992 * 2.0
delta = pitch / 2.0 / 9
if self.cur_square is not None: self.cur_square.remove()
self.cur_square = matplotlib.patches.Rectangle(
((i_x-0.5)*delta, (i_y-0.5)*delta), delta, delta,
facecolor='green')
self.pin_ax.add_patch(self.cur_square)
# Plot the energy spectrum.
df = self.df[(self.df['i_x'] == i_x) & (self.df['i_y'] == i_y)
& (self.df['azim'] == a_bin)]
flux = np.concatenate(([df['mean'].values[0]], df['mean'].values))
if self.energy_line is not None: self.energy_line.remove()
self.energy_line, = self.spec_ax.step(self.energy_edges, flux,
c='green')
if self.energy_marker is not None: self.energy_marker.remove()
self.energy_marker = self.spec_ax.axvline(energy)
# Plot the azimuthal spectrum.
df = self.df[(self.df['i_x'] == i_x) & (self.df['i_y'] == i_y)
& (self.df['energy'] == e_bin)]
flux = np.concatenate(([df['mean'].values[0]], df['mean'].values))
if self.azim_line is not None: self.azim_line.remove()
self.azim_line, = self.azim_ax.step(self.azim_edges, flux, c='green')
if self.azim_marker is not None: self.azim_marker.remove()
self.azim_marker = self.azim_ax.axvline(azim)
# Adjust the azimuthal spectrum yscale.
pad = 0.05 * (flux.max() - flux.min())
pad = max(pad, 0.1*flux.max())
self.azim_ax.set_ylim(flux.min() - pad, flux.max() + pad)
# Draw an azimuthal ray on the spatial plot.
x0 = i_x*delta
y0 = i_y*delta
x_target = pitch/2.0 if abs(azim) > np.pi / 2.0 else -pitch/2.0
y_target = pitch/2.0 if azim < 0.0 else -pitch/2.0
d0 = (x_target - x0) / np.cos(azim)
d1 = (y_target - y0) / np.sin(azim)
if d0 < d1:
x1 = x_target
y1 = y0 + d0*np.sin(azim)
else:
x1 = x0 + d1*np.cos(azim)
y1 = y_target
if self.azim_ray is not None: self.azim_ray.remove()
self.azim_ray, = self.pin_ax.plot((x0, x1), (y0, y1), c='black',
linewidth=2)
# Redraw the image.
self.fig.canvas.draw()
if __name__ == '__main__':
# Get the path to the data directory.
my_path = os.path.abspath(__file__)
my_dir = os.path.dirname(my_path)
root_path = os.path.join(my_dir, os.pardir)
data_path = os.path.join(root_path, 'data')
# Pick an appropriate data file.
data_file = 'fine.p'
data_path = os.path.join(data_path, data_file)
# Read the tally data and start the gui.
with open(data_path, 'rb') as fh:
df = pickle.load(fh)
viewer = Viewer(df)
plt.show()
| mit |
ioam/svn-history | topo/tests/__init__.py | 2 | 8892 | """
Unit tests for Topographica.
Use the 'run' function to run all the tests.
We use unittest and doctest to create tests. The run() function calls
tests in files in topo/tests/ that:
* have a name beginning with 'test' and ending with '.py', if the file
defines the 'suite' attribute;
* have a name beginning with 'test' and ending with '.txt'.
If Tkinter cannot be imported, files that have a name ending with
'_tk' are not imported (hence any tests that they contain are
skipped).
unittest
========
We use unittest in two different ways. The first is simply
to run a series of tests:
class TestSomething(unittest.TestCase):
def setUp(self):
...
def test_x(self):
...
def test_y(self):
...
def extra(self):
...
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSomething))
In the example above, setUp will be called, followed by test_x and
test_y (i.e. the methods setUp and test_* are called automatically);
the extra() method will not be called (unless your code calls it).
setUp does not have to exist.
The second way we use unittest is to pass a series of scenarios
through one battery of tests:
class TestSomething(unittest.TestCase):
def test_x(self):
...
def test_y(self):
...
class TestCase1(TestSomething):
def setUp(self):
...
class TestCase2(TestSomething):
def setUp(self):
...
suite = unittest.TestSuite()
cases = [TestScenario1,TestScenario2]
suite.addTests([unittest.makeSuite(case) for case in cases])
In this second example, TestScenario1.setUp will be called, followed
by test_x and test_y. After this, TestScenario2.setUp will be called,
followed again by test_x and test_y. setUp in the two TestScenarios is
therefore used to create some different data or situations to pass
through the tests.
To be run() automatically, unittest files must (a) be named test*.py, and
(b) must define the 'suite' attribute.
Additionally, unittest files should:
(a) contain the following code to allow the file to be run on its own:
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
(b) set suite.requires_display=True if the tests require a DISPLAY.
(c) call topo.tests.start_tkgui() before running tests (e.g. in
setUP()) if they require the GUI to be running
doctest
=======
$Id$
"""
__version__='$Revision$'
# CEBALERT: It might be good if tests/ were a directory at the top
# level, with a subdirectory structure mirroring that of topo/. Then
# it is more likely we'd have a separate test file for each module,
# and we could also simply name the files the same as what they are
# testing, which could make it simpler to find the right test file.
# CEBALERT: tests often affect each other. Make sure test authors are
# aware of that, and have some kind of policy. (Setting class
# attributes, sharing a sim, etc)
# CEBALERT: some of the test modules are missing code to handle running
# (i.e. running as './topographica topo/tests/testsheet.py').
import unittest,doctest,os,re,fnmatch,socket
import param
# Automatically discover all test*.py files in this directory
__all__ = [re.sub('\.py$','',f)
for f in fnmatch.filter(os.listdir(__path__[0]),'test*.py')]
all_doctest = sorted(fnmatch.filter(os.listdir(__path__[0]),'test*.txt'))
# location in which to create semi-permanent test data
output_path = param.normalize_path.prefix
tests_output_path = os.path.join(output_path,'tests',socket.gethostname())
if not os.path.exists(tests_output_path):
print "Creating %s"%tests_output_path
os.makedirs(tests_output_path)
try:
import Tkinter
except ImportError:
tk_tests = fnmatch.filter(__all__,'*_tk')
tk_doctests = fnmatch.filter(all_doctest,'*_tk')
param.Parameterized().warning('no Tkinter module: skipping %s'%str(tk_tests+tk_doctests))
for t in tk_tests:
__all__.remove(t)
for t in tk_doctests:
all_doctest.remove(t)
try:
import gmpy
gmpy_imported=True
except ImportError:
gmpy_imported=False
if gmpy_imported and gmpy.__file__ is None:
gmpy_imported=False
if not gmpy_imported:
import param
param.Parameterized().warning('no gmpy module: testgmpynumber.txt skipped')
all_doctest.remove('testgmpynumber.txt')
# CEBALERT: we need to rename these/reorganize the tests
__all__.remove('test_script')
__all__.remove('test_map_measurement')
try:
import scikits.audiolab
except ImportError:
import param
param.Parameterized().message("no scikits.audiolab: testaudio.py skipped")
__all__.remove('testaudio')
# CEBALERT: should be using python warnings, and having unittest
# report warnings.
try:
import matplotlib
except ImportError:
import param
param.Parameterized().warning("Matplotlib is not available; skipping Matplotlib tests.")
__all__.remove('testmatplotlib')
__all__.remove('testmatplotlib_tk')
__all__.sort()
def all_suite():
"""
__all__:
For each test module that defines a 'suite' attribute, add its
tests. Only adds tests requiring a display if the DISPLAY
environment variable is set.
all_doctest:
Add each doctest file to the suite.
"""
suite = unittest.TestSuite()
for test_name in __all__:
# import the module
exec 'import '+test_name
test_module = locals()[test_name]
try:
print 'Loading suite from module %s ...' % test_name,
new_test = getattr(test_module,'suite')
if _check_for_display(new_test):
print 'ok.'
suite.addTest(new_test)
else:
print 'skipped: No $DISPLAY.'
except AttributeError,err:
print err
for filename in all_doctest:
print 'Loading doctest file', filename
suite.addTest(doctest.DocFileSuite(filename))
return suite
# Note that this is set up so that if all the tests are run and
# there's no DISPLAY, tests requiring DISPLAY are skipped - but if a
# test is run individually via run_named() and it requires DISPLAY, an
# error will be raised.
def _check_for_display(suite):
"""
Return True if no DISPLAY required or DISPLAY is required and it exists,
otherwise return False.
"""
if not hasattr(suite,'requires_display'):
return True
elif os.getenv('DISPLAY'):
return True
else:
return False
def run(verbosity=1,test_modules=None):
"""
Run tests in all test_modules; test_modules defaults to all_suit().
E.g. to run all tests:
./topographica -c 'from topo.tests import run; run()'
verbosity specifies the level of information printed during the
tests (see unittest.TextTestRunner).
To run only a subset of the tests, specify a list of test modules or doctest
file names. For example:
./topographica -c 'from topo.tests import run, testimage, testsheet; run(test_modules=[testimage,testsheet,"testDynamicParameter.txt"])'
"""
import types
if not test_modules:
run_suite = all_suite()
else:
assert isinstance(test_modules,list), 'test_modules argument must be a list of test modules or doctest filenames.'
run_suite = unittest.TestSuite()
for test_module in test_modules:
if isinstance(test_module,types.ModuleType):
if _check_for_display(test_module.suite):
run_suite.addTest(test_module.suite)
else:
raise Exception("Cannot run test without a valid DISPLAY.")
elif isinstance(test_module,str):
if test_module in all_doctest:
run_suite.addTest(doctest.DocFileSuite(test_module))
else:
raise ValueError, '"%s" is not an available doctest file.' % test_module
else:
raise ValueError, '%s is not a valid test module' % str(test_module)
return unittest.TextTestRunner(verbosity=verbosity).run(run_suite)
# CB: if the unit tests were faster, I wouldn't keep needing this...
def run_named(name,verbosity=2):
"""
Run the named test module.
Convenience function to make it easy to run a single test module.
Examples:
./topographica -c 'import topo.tests; topo.tests.run_named("testsnapshots.py")'
./topographica -c 'import topo.tests; topo.tests.run_named("testDynamicParameter.txt")'
"""
if name.endswith('.py'):
module_name = "topo.tests."+name[0:-3]
import __main__
exec "import %s"%module_name in __main__.__dict__
test_module = eval(module_name,__main__.__dict__)
else:
test_module = name
run(verbosity,test_modules=[test_module])
| bsd-3-clause |
CDSFinance/zipline | tests/risk/answer_key.py | 39 | 11989 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hashlib
import os
import numpy as np
import pandas as pd
import pytz
import xlrd
import requests
from six.moves import map
def col_letter_to_index(col_letter):
# Only supports single letter,
# but answer key doesn't need multi-letter, yet.
index = 0
for i, char in enumerate(reversed(col_letter)):
index += ((ord(char) - 65) + 1) * pow(26, i)
return index
DIR = os.path.dirname(os.path.realpath(__file__))
ANSWER_KEY_CHECKSUMS_PATH = os.path.join(DIR, 'risk-answer-key-checksums')
ANSWER_KEY_CHECKSUMS = open(ANSWER_KEY_CHECKSUMS_PATH, 'r').read().splitlines()
ANSWER_KEY_FILENAME = 'risk-answer-key.xlsx'
ANSWER_KEY_PATH = os.path.join(DIR, ANSWER_KEY_FILENAME)
ANSWER_KEY_BUCKET_NAME = 'zipline-test_data'
ANSWER_KEY_DL_TEMPLATE = """
https://s3.amazonaws.com/zipline-test-data/risk/{md5}/risk-answer-key.xlsx
""".strip()
LATEST_ANSWER_KEY_URL = ANSWER_KEY_DL_TEMPLATE.format(
md5=ANSWER_KEY_CHECKSUMS[-1])
def answer_key_signature():
with open(ANSWER_KEY_PATH, 'rb') as f:
md5 = hashlib.md5()
buf = f.read(1024)
md5.update(buf)
while buf != b"":
buf = f.read(1024)
md5.update(buf)
return md5.hexdigest()
def ensure_latest_answer_key():
"""
Get the latest answer key from a publically available location.
Logic for determining what and when to download is as such:
- If there is no local spreadsheet file, then get the lastest answer key,
as defined by the last row in the checksum file.
- If there is a local spreadsheet file:
-- If the spreadsheet's checksum is in the checksum file:
--- If the spreadsheet's checksum does not match the latest, then grab the
the latest checksum and replace the local checksum file.
--- If the spreadsheet's checksum matches the latest, then skip download,
and use the local spreadsheet as a cached copy.
-- If the spreadsheet's checksum is not in the checksum file, then leave
the local file alone, assuming that the local xls's md5 is not in the list
due to local modifications during development.
It is possible that md5's could collide, if that is ever case, we should
then find an alternative naming scheme.
The spreadsheet answer sheet is not kept in SCM, as every edit would
increase the repo size by the file size, since it is treated as a binary.
"""
answer_key_dl_checksum = None
local_answer_key_exists = os.path.exists(ANSWER_KEY_PATH)
if local_answer_key_exists:
local_hash = answer_key_signature()
if local_hash in ANSWER_KEY_CHECKSUMS:
# Assume previously downloaded version.
# Check for latest.
if local_hash != ANSWER_KEY_CHECKSUMS[-1]:
# More recent checksum, download
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
else:
# Assume local copy that is being developed on
answer_key_dl_checksum = None
else:
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
if answer_key_dl_checksum:
res = requests.get(
ANSWER_KEY_DL_TEMPLATE.format(md5=answer_key_dl_checksum))
with open(ANSWER_KEY_PATH, 'wb') as f:
f.write(res.content)
# Get latest answer key on load.
ensure_latest_answer_key()
class DataIndex(object):
"""
Coordinates for the spreadsheet, using the values as seen in the notebook.
The python-excel libraries use 0 index, while the spreadsheet in a GUI
uses a 1 index.
"""
def __init__(self, sheet_name, col, row_start, row_end,
value_type='float'):
self.sheet_name = sheet_name
self.col = col
self.row_start = row_start
self.row_end = row_end
self.value_type = value_type
@property
def col_index(self):
return col_letter_to_index(self.col) - 1
@property
def row_start_index(self):
return self.row_start - 1
@property
def row_end_index(self):
return self.row_end - 1
def __str__(self):
return "'{sheet_name}'!{col}{row_start}:{col}{row_end}".format(
sheet_name=self.sheet_name,
col=self.col,
row_start=self.row_start,
row_end=self.row_end
)
class AnswerKey(object):
INDEXES = {
'RETURNS': DataIndex('Sim Period', 'D', 4, 255),
'BENCHMARK': {
'Dates': DataIndex('s_p', 'A', 4, 254, value_type='date'),
'Returns': DataIndex('s_p', 'H', 4, 254)
},
# Below matches the inconsistent capitalization in spreadsheet
'BENCHMARK_PERIOD_RETURNS': {
'Monthly': DataIndex('s_p', 'R', 8, 19),
'3-Month': DataIndex('s_p', 'S', 10, 19),
'6-month': DataIndex('s_p', 'T', 13, 19),
'year': DataIndex('s_p', 'U', 19, 19),
},
'BENCHMARK_PERIOD_VOLATILITY': {
'Monthly': DataIndex('s_p', 'V', 8, 19),
'3-Month': DataIndex('s_p', 'W', 10, 19),
'6-month': DataIndex('s_p', 'X', 13, 19),
'year': DataIndex('s_p', 'Y', 19, 19),
},
'ALGORITHM_PERIOD_RETURNS': {
'Monthly': DataIndex('Sim Period', 'Z', 23, 34),
'3-Month': DataIndex('Sim Period', 'AA', 25, 34),
'6-month': DataIndex('Sim Period', 'AB', 28, 34),
'year': DataIndex('Sim Period', 'AC', 34, 34),
},
'ALGORITHM_PERIOD_VOLATILITY': {
'Monthly': DataIndex('Sim Period', 'AH', 23, 34),
'3-Month': DataIndex('Sim Period', 'AI', 25, 34),
'6-month': DataIndex('Sim Period', 'AJ', 28, 34),
'year': DataIndex('Sim Period', 'AK', 34, 34),
},
'ALGORITHM_PERIOD_SHARPE': {
'Monthly': DataIndex('Sim Period', 'AL', 23, 34),
'3-Month': DataIndex('Sim Period', 'AM', 25, 34),
'6-month': DataIndex('Sim Period', 'AN', 28, 34),
'year': DataIndex('Sim Period', 'AO', 34, 34),
},
'ALGORITHM_PERIOD_BETA': {
'Monthly': DataIndex('Sim Period', 'AP', 23, 34),
'3-Month': DataIndex('Sim Period', 'AQ', 25, 34),
'6-month': DataIndex('Sim Period', 'AR', 28, 34),
'year': DataIndex('Sim Period', 'AS', 34, 34),
},
'ALGORITHM_PERIOD_ALPHA': {
'Monthly': DataIndex('Sim Period', 'AT', 23, 34),
'3-Month': DataIndex('Sim Period', 'AU', 25, 34),
'6-month': DataIndex('Sim Period', 'AV', 28, 34),
'year': DataIndex('Sim Period', 'AW', 34, 34),
},
'ALGORITHM_PERIOD_BENCHMARK_VARIANCE': {
'Monthly': DataIndex('Sim Period', 'BJ', 23, 34),
'3-Month': DataIndex('Sim Period', 'BK', 25, 34),
'6-month': DataIndex('Sim Period', 'BL', 28, 34),
'year': DataIndex('Sim Period', 'BM', 34, 34),
},
'ALGORITHM_PERIOD_COVARIANCE': {
'Monthly': DataIndex('Sim Period', 'BF', 23, 34),
'3-Month': DataIndex('Sim Period', 'BG', 25, 34),
'6-month': DataIndex('Sim Period', 'BH', 28, 34),
'year': DataIndex('Sim Period', 'BI', 34, 34),
},
'ALGORITHM_PERIOD_DOWNSIDE_RISK': {
'Monthly': DataIndex('Sim Period', 'BN', 23, 34),
'3-Month': DataIndex('Sim Period', 'BO', 25, 34),
'6-month': DataIndex('Sim Period', 'BP', 28, 34),
'year': DataIndex('Sim Period', 'BQ', 34, 34),
},
'ALGORITHM_PERIOD_SORTINO': {
'Monthly': DataIndex('Sim Period', 'BR', 23, 34),
'3-Month': DataIndex('Sim Period', 'BS', 25, 34),
'6-month': DataIndex('Sim Period', 'BT', 28, 34),
'year': DataIndex('Sim Period', 'BU', 34, 34),
},
'ALGORITHM_RETURN_VALUES': DataIndex(
'Sim Cumulative', 'D', 4, 254),
'ALGORITHM_CUMULATIVE_VOLATILITY': DataIndex(
'Sim Cumulative', 'P', 4, 254),
'ALGORITHM_CUMULATIVE_SHARPE': DataIndex(
'Sim Cumulative', 'R', 4, 254),
'CUMULATIVE_DOWNSIDE_RISK': DataIndex(
'Sim Cumulative', 'U', 4, 254),
'CUMULATIVE_SORTINO': DataIndex(
'Sim Cumulative', 'V', 4, 254),
'CUMULATIVE_INFORMATION': DataIndex(
'Sim Cumulative', 'AA', 4, 254),
'CUMULATIVE_BETA': DataIndex(
'Sim Cumulative', 'AD', 4, 254),
'CUMULATIVE_ALPHA': DataIndex(
'Sim Cumulative', 'AE', 4, 254),
'CUMULATIVE_MAX_DRAWDOWN': DataIndex(
'Sim Cumulative', 'AH', 4, 254),
}
def __init__(self):
self.workbook = xlrd.open_workbook(ANSWER_KEY_PATH)
self.sheets = {}
self.sheets['Sim Period'] = self.workbook.sheet_by_name('Sim Period')
self.sheets['Sim Cumulative'] = self.workbook.sheet_by_name(
'Sim Cumulative')
self.sheets['s_p'] = self.workbook.sheet_by_name('s_p')
for name, index in self.INDEXES.items():
if isinstance(index, dict):
subvalues = {}
for subkey, subindex in index.items():
subvalues[subkey] = self.get_values(subindex)
setattr(self, name, subvalues)
else:
setattr(self, name, self.get_values(index))
def parse_date_value(self, value):
return xlrd.xldate_as_tuple(value, 0)
def parse_float_value(self, value):
return value if value != '' else np.nan
def get_raw_values(self, data_index):
return self.sheets[data_index.sheet_name].col_values(
data_index.col_index,
data_index.row_start_index,
data_index.row_end_index + 1)
@property
def value_type_to_value_func(self):
return {
'float': self.parse_float_value,
'date': self.parse_date_value,
}
def get_values(self, data_index):
value_parser = self.value_type_to_value_func[data_index.value_type]
return [value for value in
map(value_parser, self.get_raw_values(data_index))]
ANSWER_KEY = AnswerKey()
BENCHMARK_DATES = ANSWER_KEY.BENCHMARK['Dates']
BENCHMARK_RETURNS = ANSWER_KEY.BENCHMARK['Returns']
DATES = [datetime.datetime(*x, tzinfo=pytz.UTC) for x in BENCHMARK_DATES]
BENCHMARK = pd.Series(dict(zip(DATES, BENCHMARK_RETURNS)))
ALGORITHM_RETURNS = pd.Series(
dict(zip(DATES, ANSWER_KEY.ALGORITHM_RETURN_VALUES)))
RETURNS_DATA = pd.DataFrame({'Benchmark Returns': BENCHMARK,
'Algorithm Returns': ALGORITHM_RETURNS})
RISK_CUMULATIVE = pd.DataFrame({
'volatility': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_VOLATILITY))),
'sharpe': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_SHARPE))),
'downside_risk': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_DOWNSIDE_RISK))),
'sortino': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_SORTINO))),
'information': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_INFORMATION))),
'alpha': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_ALPHA))),
'beta': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_BETA))),
'max_drawdown': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_MAX_DRAWDOWN))),
})
| apache-2.0 |
HeraclesHX/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
richardwolny/sms-tools | lectures/09-Sound-description/plots-code/knn.py | 25 | 1718 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import os, sys
from numpy import random
from scipy.stats import mode
def eucDist(vec1, vec2):
return np.sqrt(np.sum(np.power(np.array(vec1) - np.array(vec2), 2)))
n = 30
qn = 8
K = 3
class1 = np.transpose(np.array([np.random.normal(-2,2,n), np.random.normal(-2,2,n)]))
class2 = np.transpose(np.array([np.random.normal(2,2,n), np.random.normal(2,2,n)]))
query = np.transpose(np.array([np.random.normal(0,2,qn), np.random.normal(0,2,qn)]))
plt.figure(1, figsize=(9.5, 3.5))
plt.subplot(1,2,1)
plt.scatter(class1[:,0],class1[:,1], c='b', alpha=0.7, s=50, edgecolor='none')
plt.scatter(class2[:,0],class2[:,1], c='r', alpha=0.7, s=50, edgecolor='none')
plt.scatter(query[:,0],query[:,1], c='c', alpha=1, s=50)
predClass = []
for kk in range(query.shape[0]):
dist = []
for pp in range(class1.shape[0]):
euc = eucDist(query[kk,:], class1[pp,:])
dist.append([euc, 1])
for pp in range(class2.shape[0]):
euc = eucDist(query[kk,:], class2[pp,:])
dist.append([euc, 2])
dist = np.array(dist)
indSort = np.argsort(dist[:,0])
topKDist = dist[indSort[:K],1]
predClass.append(mode(topKDist)[0][0].tolist())
predClass = np.array(predClass)
indC1 = np.where(predClass==1)[0]
indC2 = np.where(predClass==2)[0]
plt.subplot(1,2,2)
plt.scatter(class1[:,0],class1[:,1], c='b', alpha=0.3, s=50, edgecolor='none')
plt.scatter(class2[:,0],class2[:,1], c='r', alpha=0.3, s=50, edgecolor='none')
plt.scatter(query[indC1,0],query[indC1,1], c='b', alpha=1, s=50)
plt.scatter(query[indC2,0],query[indC2,1], c='r', alpha=1, s=50)
plt.tight_layout()
plt.savefig('knn.png')
plt.show()
| agpl-3.0 |
yarikoptic/pystatsmodels | statsmodels/datasets/strikes/data.py | 3 | 1928 | #! /usr/bin/env python
"""U.S. Strike Duration Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
This is a subset of the data used in Kennan (1985). It was originally
published by the Bureau of Labor Statistics.
::
Kennan, J. 1985. "The duration of contract strikes in US manufacturing.
`Journal of Econometrics` 28.1, 5-28.
"""
DESCRSHORT = """Contains data on the length of strikes in US manufacturing and
unanticipated industrial production."""
DESCRLONG = """Contains data on the length of strikes in US manufacturing and
unanticipated industrial production. The data is a subset of the data originally
used by Kennan. The data here is data for the months of June only to avoid
seasonal issues."""
#suggested notes
NOTE = """
Number of observations - 62
Number of variables - 2
Variable name definitions::
duration - duration of the strike in days
iprod - unanticipated industrial production
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the strikes data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the strikes data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/strikes.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
yunlongliukm/chm1_scripts | GC_content_explore.py | 2 | 8819 | #!/usr/bin/env python
import pysam
import re
import matplotlib
import matplotlib.pyplot as plt
import numpy
from pbcore.io import CmpH5Reader
from pbcore.io import CmpH5Alignment
def IdentityFromCIGAR(cigar):
nMatch = 0
nIns = 0
nDel = 0
for cig in cigar:
if (cig[0] == 0):
nMatch += cig[1]
elif(cig[0] == 1):
nIns += cig[1]
elif(cig[0] == 2):
nDel += cig[1]
denom = float(nMatch + nIns + nDel)
return nMatch / denom
class AlignmentSummary:
def __init__(self, identity, length):
self.identity = identity
self.length = length
self.zmw = 0
self.sub = 0
def SamToMap(samFileName, samMap):
sf = pysam.Samfile( samFileName, "r" )
for aln in sf.fetch():
if (aln.rname == "*"):
continue
ident = IdentityFromCIGAR(aln.cigar)
samMap[aln.qname] = AlignmentSummary(ident, aln.qlen)
def GetSubreadGC(subread):
return (float(subread.basecalls().count('G') + subread.basecalls().count('C')) / len(subread.basecalls()))
def GetGC(read):
maxLen = 0
maxS = 0
for s in range(0,len(read.subreads)):
l = len(read.subreads[s].basecalls())
if (l > maxLen):
maxLen = l
maxS = s
return (float(read.subreads[maxS].basecalls().count('G') + read.subreads[maxS].basecalls().count("C"))) / len(read.subreads[maxS].basecalls())
#dh5 = "/net/eichler/vol20/projects/pacbio/backups/incoming/130625_MYD_eee_20kb_368/D01_1/Analysis_Results/m130626_034031_42134_c100534392550000001823079711101324_s1_p0.bas.h5"
#dsam = "/net/eichler/vol20/projects/pacbio/nobackups/results/130625_MYD_eee_20kb_368/D01_1/D.sam"
dh5 = "/mnt/pacbio/D01_1/Analysis_Results/m130626_034031_42134_c100534392550000001823079711101324_s1_p0.bas.h5"
dsam = "/mnt/pacbio_analysis/D01_1/D.sam"
from pbcore.io import BasH5Reader
dReader = BasH5Reader(dh5)
#
# key:
# rs read score
# rl read length
# mi mapped identity
# ml mapped length
# m indices of mapped reads
# um indices of unmapped reads
# s mapped subreads
# us unmapped subreads
class Count:
def __init__(self):
self.fields = ["rs", "rl", "mi", "ml", "m", "um", "s", "us"]
self.data = { f: [] for f in self.fields }
self.npdata = {}
def ToNumpy(self):
self.npdata = { f: numpy.array(self.data[f]) for f in self.fields }
def StoreMapped(fileNames, alnMap, stats):
for fileName in fileNames:
reader = BasH5Reader(fileName)
for zmw in reader.sequencingZmws:
for s in reader[zmw].subreads:
stats.data["rs"].append(reader[zmw].readScore)
stats.data["rl"].append(s.readEnd - s.readStart)
if (s.readName in alnMap):
stats.data["m"].append(len(stats.data["rs"]) - 1 )
stats.data["ml"].append(alnMap[s.readName].length)
stats.data["mi"].append(alnMap[s.readName].identity)
stats.data["s"].append(s)
else:
stats.data["um"].append(len(stats.data["rs"]) - 1 )
stats.data["ml"].append(0)
stats.data["mi"].append(0)
stats.data["us"].append(s)
dfn = ["/mnt/pacbio/D01_1/Analysis_Results/m130626_034031_42134_c100534392550000001823079711101324_s1_p0.bas.h5"]
dsam = "/mnt/pacbio_analysis/D01_1/D.sam"
dcmp = "/mnt/pacbio_analysis/D01_1/D.cmp.h5"
gfn = ["/mnt/pacbio/G01_1/Analysis_Results/m130626_103730_42134_c100534392550000001823079711101327_s1_p0.bas.h5","/mnt/pacbio/G01_1/Analysis_Results/m130626_103730_42134_c100534392550000001823079711101327_s2_p0.bas.h5"]
gsam = "/mnt/pacbio_analysis/G01_1/G.sam"
hfn = ["/mnt/pacbio/H01_1/Analysis_Results/m130626_125440_42134_c100534382550000001823079711101330_s1_p0.bas.h5","/mnt/pacbio/H01_1/Analysis_Results/m130626_125440_42134_c100534382550000001823079711101330_s2_p0.bas.h5"]
hsam = "/mnt/pacbio_analysis/H01_1/H.sam"
ffn = ["/mnt/pacbio/F01_1/Analysis_Results/m130626_081902_42134_c100534392550000001823079711101326_s1_p0.bas.h5","/mnt/pacbio/F01_1/Analysis_Results/m130626_081902_42134_c100534392550000001823079711101326_s2_p0.bas.h5"]
fsam = "/mnt/pacbio_analysis/F01_1/F.sam"
dStats = Count()
dh5Files = [dh5]
dSamMap = {}
SamToMap(dsam, dSamMap)
StoreMapped(dfn, dSamMap, dStats)
dStats.ToNumpy()
fStats = Count()
fSamMap = {}
SamToMap(fsam, fSamMap)
StoreMapped(ffn, fSamMap, fStats)
fStats.ToNumpy()
gStats = Count()
gSamMap = {}
SamToMap(gsam, gSamMap)
StoreMapped(gfn, gSamMap, gStats)
gStats.ToNumpy()
hStats = Count()
hSamMap = {}
SamToMap(hsam, hSamMap)
StoreMapped(hfn, hSamMap, hStats)
hStats.ToNumpy()
def ArrayHist(array, nbins=30):
h = numpy.histogram(array, bins=nbins)
return (h[1][0:-1], h[0])
def StatsHist(stats, dataset="rs", which="m", minValue=None):
d = stats.npdata[dataset][stats.npdata[which]]
if (minValue is not None):
d = d[d > minValue]
h = numpy.histogram(d, bins=30)
return (h[1][0:-1], h[0])
dh = StatsHist(dStats, dataset="rs", which="m", minValue = 0.25)
fh = StatsHist(fStats, dataset="rs", which="m", minValue = 0.25)
duh =StatsHist(dStats, dataset="rs", which="um", minValue = 0.25)
fuh =StatsHist(fStats, dataset="rs", which="um", minValue = 0.25)
ax = plt.axes
plt.scatter(dh[0], dh[1], axes=ax)
plt.scatter(fh[0], fh[1], axes=ax, color="red")
plt.scatter(duh[0], duh[1], axes=ax, color="LightBlue")
plt.scatter(fuh[0], fuh[1], axes=ax, color="pink")
plt.show()
dCmpR = CmpH5Reader(dcmp)
mgc = numpy.array([GetSubreadGC(sr) for sr in gStats.npdata["s"]])
umgc = numpy.array([GetSubreadGC(sr) for sr in gStats.npdata["us"]])
dmgc = numpy.array([GetSubreadGC(sr) for sr in dStats.npdata["s"]])
dumgc = numpy.array([GetSubreadGC(sr) for sr in dStats.npdata["us"]])
hmgc = numpy.array([GetSubreadGC(sr) for sr in hStats.npdata["s"]])
humgc = numpy.array([GetSubreadGC(sr) for sr in hStats.npdata["us"]]
)
def GetLengths(subreads):
return numpy.array([len(sr.basecalls()) for sr in subreads])
def IMean(array, indices):
return np.mean(array[indices])
def LimitIndices(array, minValue = 0, maxValue=10000000):
lowi = array > minValue
highi = array < maxValue
return lowi & highi
hl = GetLengths(hStats.npdata["s"])
hul = GetLengths(hStats.npdata["us"])
dl = GetLengths(dStats.npdata["s"])
dul = GetLengths(dStats.npdata["us"])
gmgch = ArrayHist(mgc)
gumgch = ArrayHist(umgc)
dmgch = ArrayHist(dmgc)
dumgch = ArrayHist(umgc)
ax1 = plt.subplot(121)
ax1.scatter(dl, dmgc, color="DarkRed", alpha=0.10)
ax1.scatter(hl, hmgc, color="DarkBlue", alpha=0.10)
ax2 = plt.subplot(122)
ax2.scatter(dul, dumgc, color="HotPink", alpha=0.10)
ax2.scatter(hul, humgc, color="DodgerBlue", alpha=0.10)
plt.show()
hi = LimitIndices(hmgc, 0.1, 0.7)
hui = LimitIndices(humgc, 0.1, 0.7)
di = LimitIndices(dmgc, 0.1, 0.7)
dui = LimitIndices(dumgc, 0.1, 0.7)
hi = hl > 1000
hui = hul > 1000
di =dl > 1000
dui = dul > 1000
ax1 = plt.subplot(121)
ax1.scatter(dl[di], dmgc[di], color="DarkRed", alpha=0.10)
ax1.scatter(hl[hi], hmgc[hi], color="DarkBlue", alpha=0.10)
ax2 = plt.subplot(122)
ax2.scatter(dul[dui], dumgc[dui], color="HotPink", alpha=0.10)
ax2.scatter(hul[hui], humgc[hui], color="DodgerBlue", alpha=0.10)
plt.show()
print numpy.mean(hmgc[hi])
print numpy.mean(humgc[hui])
print numpy.mean(dmgc[di])
print numpy.mean(dumgc[dui])
def GetGCContentByLength(lens, gc, nBins = 100):
maxLength = np.max(lens)
binSize = maxLength/nBins
gcBins = [ [] for i in range(0,nBins)]
for i in range(0,len(lens)):
binIndex = min(int(lens[i]/binSize), nBins-1)
gcBins[binIndex].append(gc[i])
means = [ np.mean(gcBins[i]) if (len(gcBins[i]) > 0) else 0 for i in range(0,nBins) ]
sds = [ np.std(gcBins[i]) if (len(gcBins[i]) > 0) else 0 for i in range(0,nBins) ]
x = [ binSize * i for i in range(0,nBins) ]
return (x, np.array(means), np.array(sds))
(dx,dm,ds) = GetGCContentByLength(dl, dmgc)
(dux,dum,dus) = GetGCContentByLength(dul, dumgc)
(hx,hm,hs) = GetGCContentByLength(hl, hmgc)
(hux,hum,hus) = GetGCContentByLength(hul, humgc)
fig = plt.figure(figsize=(12,6))
ax1 = plt.subplot(121)
ax1.errorbar(hx,hm,yerr=hs, ecolor="DodgerBlue", color="blue")
ax1.errorbar(dx,dm,yerr=ds, ecolor="HotPink", color="red")
ax1.legend(("Dra1", "control"))
ax1.set(title="GC content of mapped reads by length")
ax1.axis([-1000,20000,0.1,0.7])
ax2 = plt.subplot(122)
ax2.errorbar(hux,hum,yerr=hus, ecolor="DodgerBlue", color="blue")
ax2.errorbar(dux,dum,yerr=dus, ecolor="HotPink", color="red")
ax2.set(title="GC content of unmapped reads by length")
ax2.axis([-1000,20000,0.1,0.7])
ax2.legend(("Dra1", "control"))
plt.show()
| mit |
ogrisel/scipy | scipy/special/c_misc/struve_convergence.py | 76 | 3725 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
try:
import mpmath
except:
from sympy import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
| bsd-3-clause |
aewhatley/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
talitaof/pulp | doc/source/_static/plotter.py | 4 | 1267 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from matplotlib import rc
rc('text', usetex=True)
rc('font', family='serif')
def plot_interval(a,c,x_left, x_right,i, fbound):
lh = c*(1-a[0])
rh = c*(1+a[1])
x=arange(x_left, x_right+1)
y=0*x
arrow_r = Arrow(c,0, c*a[1],0,0.2)
arrow_l = Arrow(c,0,-c*a[0],0,0.2)
plot(x,y)
text((x_left+lh)/2.0,0.1,'freebound interval [%s, %s] is penalty-free' % (lh,rh))
text((x_left+lh)/2.0, 0.2, 'rhs=%s, %s' % (c, fbound))
cur_ax = gca()
cur_ax.add_patch(arrow_l)
cur_ax.add_patch(arrow_r)
axis([x_left,x_right,-0.1,0.3])
yticks([])
title('Elasticized constraint\_%s $C(x)= %s $' % (i, c))
figure()
subplots_adjust(hspace=0.5)
fbound = 'proportionFreeBound'
i=1
subplot(2,1,i)
a=[0.01,0.01]
c = 200
x_left = 0.97*c
x_right = 1.03*c
fb_string = '%s%s = %s' %(fbound,'', a[0])
plot_interval(a,c,x_left, x_right,i, fb_string)
i += 1
subplot(2,1,i)
a=[0.02, 0.05]
c = 500
x_left = 0.9*c #scale of window
x_right = 1.2*c #scale of window
fb_string = '%s%s = [%s,%s]' % (fbound,'List', a[0],a[1])
plot_interval(a,c,x_left, x_right,i, fb_string)
savefig('freebound.jpg')
savefig('freebound.pdf')
# vim: fenc=utf-8: ft=python:sw=4:et:nu:fdm=indent:fdn=1:syn=python
| mit |
stggh/PyAbel | examples/example_hansenlaw.py | 2 | 3117 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import abel
import matplotlib.pylab as plt
import bz2
# Hansen and Law inverse Abel transform of velocity-map imaged electrons
# from O2- photodetachement at 454 nm. The spectrum was recorded in 2010
# at the Australian National University (ANU)
# J. Chem. Phys. 133, 174311 (2010) DOI: 10.1063/1.3493349
# load image as a numpy array
# use scipy.misc.imread(filename) to load image formats (.png, .jpg, etc)
print('HL: loading "data/O2-ANU1024.txt.bz2"')
imagefile = bz2.BZ2File('data/O2-ANU1024.txt.bz2')
IM = np.loadtxt(imagefile)
rows, cols = IM.shape # image size
# center image returning odd size
IMc = abel.tools.center.center_image(IM, center='com')
# dr=0.5 may help reduce pixel grid coarseness
# NB remember to also pass as an option to angular_integration
AIM = abel.Transform(IMc, method='hansenlaw',
use_quadrants=(True, True, True, True),
symmetry_axis=None,
transform_options=dict(dr=0.5, align_grid=False),
angular_integration=True,
angular_integration_options=dict(dr=0.5),
verbose=True)
# convert to photoelectron spectrum vs binding energy
# conversion factors depend on measurement parameters
eBE, PES = abel.tools.vmi.toPES(*AIM.angular_integration,
energy_cal_factor=1.204e-5,
photon_energy=1.0e7/454.5, Vrep=-2200,
zoom=IM.shape[-1]/2048)
# Set up some axes
fig = plt.figure(figsize=(15, 4))
ax1 = plt.subplot2grid((1, 3), (0, 0))
ax2 = plt.subplot2grid((1, 3), (0, 1))
ax3 = plt.subplot2grid((1, 3), (0, 2))
# raw image
im1 = ax1.imshow(IM, aspect='auto', extent=[-512, 512, -512, 512])
fig.colorbar(im1, ax=ax1, fraction=.1, shrink=0.9, pad=0.03)
ax1.set_xlabel('x (pixels)')
ax1.set_ylabel('y (pixels)')
ax1.set_title('velocity map image: size {:d}x{:d}'.format(rows, cols))
# 2D transform
c2 = cols//2 # half-image width
im2 = ax2.imshow(AIM.transform, aspect='auto', vmin=0,
vmax=AIM.transform[:c2-50, :c2-50].max(),
extent=[-512, 512, -512, 512])
fig.colorbar(im2, ax=ax2, fraction=.1, shrink=0.9, pad=0.03)
ax2.set_xlabel('x (pixels)')
ax2.set_ylabel('y (pixels)')
ax2.set_title('Hansen Law inverse Abel')
# 1D speed distribution
#ax3.plot(radial, speeds/speeds[200:].max())
#ax3.axis(xmax=500, ymin=-0.05, ymax=1.1)
#ax3.set_xlabel('speed (pixel)')
#ax3.set_ylabel('intensity')
#ax3.set_title('speed distribution')
# PES
ax3.plot(eBE, PES/PES[eBE < 5000].max())
ax3.axis(xmin=0)
ax3.set_xlabel(r'elecron binding energy (cm$^{-1}$)')
ax3.set_ylabel('intensity')
ax3.set_title(r'O${_2}{^-}$ 454 nm photoelectron spectrum')
# Prettify the plot a little bit:
plt.subplots_adjust(left=0.06, bottom=0.17, right=0.95, top=0.89, wspace=0.35,
hspace=0.37)
# save copy of the plot
plt.savefig('plot_example_hansenlaw.png', dpi=100)
plt.show()
| mit |
alexis-roche/nipy | examples/labs/bayesian_structural_analysis.py | 2 | 3988 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
This script generates a noisy multi-subject activation image dataset
and applies the Bayesian structural analysis on it
Requires matplotlib
Author : Bertrand Thirion, 2009-2013
"""
print(__doc__)
import numpy as np
import scipy.stats as st
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
import nipy.labs.utils.simul_multisubject_fmri_dataset as simul
from nipy.labs.spatial_models.bayesian_structural_analysis import\
compute_landmarks
from nipy.labs.spatial_models.discrete_domain import grid_domain_from_shape
def display_landmarks_2d(landmarks, hrois, stats):
""" Plots the landmarks and associated rois as images"""
shape = stats[0].shape
n_subjects = len(stats)
lmax = 0
grp_map, density = np.zeros(shape), np.zeros(shape)
if landmarks is not None:
domain = landmarks.domain
grp_map = landmarks.map_label(domain.coord, .8, sigma).reshape(shape)
density = landmarks.kernel_density(k=None, coord=domain.coord,
sigma=sigma).reshape(shape)
lmax = landmarks.k + 2
# Figure 1: input data
fig_input = plt.figure(figsize=(8, 3.5))
fig_input.text(.5,.9, "Input activation maps", ha='center')
vmin, vmax = stats.min(), stats.max()
for subject in range(n_subjects):
plt.subplot(n_subjects / 5, 5, subject + 1)
plt.imshow(stats[subject], interpolation='nearest',
vmin=vmin, vmax=vmax)
plt.axis('off')
# Figure 2: individual hrois
fig_output = plt.figure(figsize=(8, 3.5))
fig_output.text(.5, .9, "Individual landmark regions", ha="center")
for subject in range(n_subjects):
plt.subplot(n_subjects / 5, 5, subject + 1)
lw = - np.ones(shape)
if hrois[subject].k > 0:
nls = hrois[subject].get_roi_feature('label')
nls[nls == - 1] = np.size(landmarks) + 2
for k in range(hrois[subject].k):
np.ravel(lw)[hrois[subject].label == k] = nls[k]
plt.imshow(lw, interpolation='nearest', vmin=-1, vmax=lmax)
plt.axis('off')
# Figure 3: Group-level results
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plt.imshow(grp_map, interpolation='nearest', vmin=-1, vmax=lmax)
plt.title('group-level position 80% \n confidence regions', fontsize=10)
plt.axis('off')
plt.colorbar(shrink=.8)
plt.subplot(1, 2, 2)
plt.imshow(density, interpolation='nearest')
plt.title('Spatial density under h1', fontsize=10)
plt.axis('off')
plt.colorbar(shrink=.8)
###############################################################################
# Main script
###############################################################################
# generate the data
n_subjects = 10
shape = (60, 60)
pos = np.array([[12, 14],
[20, 20],
[30, 20]])
ampli = np.array([5, 7, 6])
sjitter = 1.0
stats = simul.surrogate_2d_dataset(n_subj=n_subjects, shape=shape, pos=pos,
ampli=ampli, width=5.0)
# set various parameters
threshold = float(st.t.isf(0.01, 100))
sigma = 4. / 1.5
prevalence_threshold = n_subjects * .25
prevalence_pval = 0.9
smin = 5
algorithm = 'co-occurrence' # 'density'
domain = grid_domain_from_shape(shape)
# get the functional information
stats_ = np.array([np.ravel(stats[k]) for k in range(n_subjects)]).T
# run the algo
landmarks, hrois = compute_landmarks(
domain, stats_, sigma, prevalence_pval, prevalence_threshold,
threshold, smin, method='prior', algorithm=algorithm)
display_landmarks_2d(landmarks, hrois, stats)
if landmarks is not None:
landmarks.show()
plt.show()
| bsd-3-clause |
jayflo/scikit-learn | sklearn/cluster/birch.py | 207 | 22706 | # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
nal-epfl/line-sigcomm14 | plotting-scripts/plot-data.py | 1 | 36287 | #!/usr/bin/env python2
# Install dependencies:
# sudo apt-get install python-matplotlib dvipng
import colorsys
import getopt
import json
from nicePlot import nicePlot
import math
import numpy
import os
import pprint
import re
import subprocess
import sys
## Params
dataFile = 'data-plot1.txt'
latencyFile = 'latency.txt'
throughputInFile = 'throughput-in.txt'
throughputOutFile = 'throughput-out.txt'
inputDir = ''
outputDir = ''
plotNumber = '1'
extraTitleLabel = ''
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['plot=', 'in=', 'out=', 'label='])
except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
output = None
verbose = False
for opt, arg in opts:
if opt == '--plot':
plotNumber = arg
elif opt == '--in':
inputDir = arg
elif opt == '--out':
outputDir = arg
elif opt == '--label':
extraTitleLabel = arg
else:
assert False, "Unhandled option: " + str(opt)
if plotNumber == 'all':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for plot in [ '1', '1b', '2', '3', '4', '5', '5b', '7', '7b', '7b+', '8', '8b', '8b+' ]:
args = ['python', script, '--plot', plot]
print(args)
subprocess.call(args)
exit(0)
elif plotNumber == 'real-vs-shaping':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
args = ['python', script, '--plot', '1', '--in', 'plot1-real-data', '--out', 'plot1-real', '--label', '(real)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '1', '--in', 'plot1-emulator-data', '--out', 'plot1-emulator', '--label', '(emulator)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '7', '--in', 'plot7-real-data', '--out', 'plot7-real', '--label', '(real)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '7', '--in', 'plot7-emulator-data', '--out', 'plot7-emulator', '--label', '(emulator)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '2', '--in', 'plot2-real-data', '--out', 'plot2-real', '--label', '(real)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '2', '--in', 'plot2-emulator-data', '--out', 'plot2-emulator', '--label', '(emulator)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '2', '--in', 'plot2-shaping-real-data', '--out', 'plot2-shaping-real', '--label', '(real)']
print(args)
subprocess.call(args)
args = ['python', script, '--plot', '2', '--in', 'plot2-shaping-emulator-data', '--out', 'plot2-shaping-emulator', '--label', '(emulator)']
print(args)
subprocess.call(args)
exit(0)
elif plotNumber == 'vary-rtt-and-buffers':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
#for tag in ['', 'sigcomm-deadline', 'sigcomm-deadline-repro']:
#for tag in ['sigcomm-deadline-repro']:
for tag in ['']:
for qos in ['policing', 'shaping']:
niceqos = qos.capitalize()
for rtt in ['50', '80', '120', '200']:
for buffers in ['large', 'small', 'medium']:
for scaling in ['notscaled', 'scaleddown']:
nicescaling = 'not scaled' if scaling == 'notscaled' else 'scaled down' if scaling == 'scaleddown' else scaling
dataDir = 'plot2-{0}-data-rtt-{1}-buffers-{2}-{3}{4}'.format(qos, rtt, buffers, scaling, ('-' + tag) if tag else '')
plotDir = 'plot2-{0}-rtt-{1}-buffers-{2}-{3}{4}'.format(qos, rtt, buffers, scaling, ('-' + tag) if tag else '')
label = '{0}, RTT {1}ms, {2} buffers, {3}'.format(niceqos, rtt, buffers, nicescaling)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', '2',
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'diff-rtt':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for tag in ['']:
for qos in ['policing', 'shaping', 'neutral']:
niceqos = qos.capitalize()
for rtt in ['48-120', '120-48']:
for buffers in ['large', 'small', 'medium']:
for scaling in ['notscaled', 'scaleddown']:
for tcp in ['cubic']:
nicescaling = 'not scaled' if scaling == 'notscaled' else 'scaled down' if scaling == 'scaleddown' else scaling
nicertt = rtt.replace('-', '(1)/') + '(2) ms'
dataDir = 'plot2-{0}-data-rtt-{1}-buffers-{2}-{3}-{4}{5}'.format(qos, rtt, buffers, scaling, tcp, ('-' + tag) if tag else '')
plotDir = 'plot2-{0}-rtt-{1}-buffers-{2}-{3}-{4}{5}'.format(qos, rtt, buffers, scaling, tcp, ('-' + tag) if tag else '')
label = '{0}, RTT {1}, {2} buffers, {3}, TCP {4}'.format(niceqos, nicertt, buffers, nicescaling, tcp)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', '2',
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'diff-rtt-tcp':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for tag in ['']:
for qos in ['policing', 'shaping', 'neutral']:
niceqos = qos.capitalize()
for rtt in ['48-120', '120-48']:
for buffers in ['large', 'small', 'medium']:
for scaling in ['notscaled', 'scaleddown']:
for tcp in ['cubic', 'cubic-reno', 'reno-cubic']:
nicescaling = 'not scaled' if scaling == 'notscaled' else 'scaled down' if scaling == 'scaleddown' else scaling
dataDir = 'plot2-{0}-data-rtt-{1}-buffers-{2}-{3}-{4}{5}'.format(qos, rtt, buffers, scaling, tcp, ('-' + tag) if tag else '')
plotDir = 'plot2-{0}-rtt-{1}-buffers-{2}-{3}-{4}{5}'.format(qos, rtt, buffers, scaling, tcp, ('-' + tag) if tag else '')
label = '{0}, RTT {1}ms, {2} buffers, {3}, TCP {4}'.format(niceqos, rtt, buffers, nicescaling, tcp)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', '2',
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'diff-tcp':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for tag in ['cubic-reno', 'reno-cubic']:
for qos in ['policing', 'shaping', 'neutral']:
niceqos = qos.capitalize()
for rtt in ['50', '80', '120', '200']:
for buffers in ['large', 'small', 'medium']:
for scaling in ['notscaled', 'scaleddown']:
nicescaling = 'not scaled' if scaling == 'notscaled' else 'scaled down' if scaling == 'scaleddown' else scaling
dataDir = 'plot2-{0}-data-rtt-{1}-buffers-{2}-{3}{4}'.format(qos, rtt, buffers, scaling, ('-' + tag) if tag else '')
plotDir = 'plot2-{0}-rtt-{1}-buffers-{2}-{3}{4}'.format(qos, rtt, buffers, scaling, ('-' + tag) if tag else '')
label = '{0}, RTT {1}ms, {2} buffers, {3}, {4}'.format(niceqos, rtt, buffers, nicescaling, tag)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', '2',
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'vary-qos':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for d in os.listdir('.'):
if not os.path.isdir(d):
continue
if 'image' in d:
continue
plotNo = '1-7'
dataDir = d
plotDir = 'image-{0}'.format(d)
label = d
print 'Looking for {0}'.format(dataDir)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', plotNo,
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'vary-transfer-size':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for d in os.listdir('.'):
if not os.path.isdir(d):
continue
if 'image' in d:
continue
plotNo = '2'
dataDir = d
plotDir = 'image-{0}'.format(d)
label = d
print 'Looking for {0}'.format(dataDir)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', plotNo,
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'vary-rtt':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for d in os.listdir('.'):
if not os.path.isdir(d):
continue
if 'image' in d:
continue
plotNo = '9'
dataDir = d
plotDir = 'image-{0}'.format(d)
label = d
print 'Looking for {0}'.format(dataDir)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', plotNo,
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'vary-tcp':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for d in os.listdir('.'):
if not os.path.isdir(d):
continue
if 'image' in d:
continue
plotNo = '10'
dataDir = d
plotDir = 'image-{0}'.format(d)
label = d
print 'Looking for {0}'.format(dataDir)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', plotNo,
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
elif plotNumber == 'vary-congestion':
script = os.path.realpath(__file__)
print 'Running script for all scenarios...'
for d in os.listdir('.'):
if not os.path.isdir(d):
continue
if 'image' in d:
continue
plotNo = '11'
dataDir = d
plotDir = 'image-{0}'.format(d)
label = d
print 'Looking for {0}'.format(dataDir)
if os.path.isdir(dataDir):
args = ['python', script,
'--plot', plotNo,
'--in', dataDir,
'--out', plotDir,
'--label', label]
print(args)
if subprocess.call(args) != 0:
exit(1)
exit(0)
if plotNumber == '5':
if not inputDir:
inputDir = 'plot%s-data' % '1'
dataFile = 'data-plot%s.txt' % '1'
elif plotNumber == '5b':
if not inputDir:
inputDir = 'plot%s-data' % '1b'
dataFile = 'data-plot%s.txt' % '1b'
elif plotNumber == '7b+':
if not inputDir:
inputDir = 'plot%s-data' % '7b'
dataFile = 'data-plot%s.txt' % '7b'
elif plotNumber == '8':
if not inputDir:
inputDir = 'plot%s-data' % '7'
dataFile = 'data-plot%s.txt' % '7'
elif plotNumber == '8b' or plotNumber == '8b+':
if not inputDir:
inputDir = 'plot%s-data' % '7b'
dataFile = 'data-plot%s.txt' % '7b'
else:
if not inputDir:
inputDir = 'plot%s-data' % plotNumber
dataFile = 'data-plot%s.txt' % plotNumber
if not outputDir:
outputDir = 'plot%s' % plotNumber
print 'Arguments:'
print 'Input dir:', inputDir
print 'Data file:', dataFile
print 'Output dir:', outputDir
print 'Plot:', plotNumber
## End of params
def isNumber(s):
try:
float(s)
return True
except ValueError:
return False
def toBase64JS(filename):
return '"' + open(filename, 'rb').read().encode('base64').replace('\n', '" + \n"') + '"'
## Natural sorting (e.g.: asdf7, asdf8, asdf9, asdf10, ...)
# ref: http://stackoverflow.com/questions/4836710
def naturalSorted(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def reversed(l):
l2 = l
l2.reverse()
return l2
def sortCongestion(l):
convert = lambda text: int(1) if text == 'no1' else int(2) if text == 'no2' else int(3) if text == 'no3' else int(4) if text == 'lo' else int(5) if text == 'me' else int(6) if text == 'hi' else text
keyfunc = lambda key: [ convert(c) for c in re.split('/', key) ]
return sorted(l, key = keyfunc)
def mean(l):
return sum(l)/len(l) if len(l) > 0 else 0
class Experiment(object):
def __init__(self):
self.tag = ""
self.policing = ""
self.shaping = ""
self.tcp = ""
self.rtt = ""
self.congestion = ""
self.throughputIn = ""
self.pCongClass1PerPath = []
self.pCongClass2PerPath = []
self.pCongLinkComputed = []
self.procDelayAverage = 0
self.procDelayPeak = 0
self.queuingDelay = 0 # queuing delay for 1 frame
self.pathClass = []
self.congThreshold = 0.05 # congestion threshold in percent, modified below
def __repr__(self):
pp = pprint.PrettyPrinter(indent=2)
return "Experiment" + pp.pformat(self.__dict__)
def __str__(self):
pp = pprint.PrettyPrinter(indent=2)
return "Experiment" + pp.pformat(self.__dict__)
# Regenerate data file
print 'Regenerating data file from experiment data...'
args = ['bash', '-c', '''rm -f {0} ; for d in {1}/* ; do echo $d; cat "$d/path-congestion-probs.txt" 1>>{0} 2>/dev/null ; done'''.format(dataFile, inputDir)]
print(args)
subprocess.call(args)
args = ['bash', '-c', '''rm -f {0} ; for d in {1}/* ; do echo $d; cat "$d/emulator.out" | grep 'Event delay' 1>>{0} 2>/dev/null ; done'''.format(latencyFile, inputDir)]
print(args)
subprocess.call(args)
args = ['bash', '-c', '''rm -f {0} ; for d in {1}/* ; do echo $d; cat "$d/emulator.out" | grep 'Bits received per second' 1>>{0} 2>/dev/null ; done'''.format(throughputInFile, inputDir)]
print(args)
subprocess.call(args)
args = ['bash', '-c', '''rm -f {0} ; for d in {1}/* ; do echo $d; cat "$d/emulator.out" | grep 'Bits sent per second' 1>>{0} 2>/dev/null ; done'''.format(throughputOutFile, inputDir)]
print(args)
subprocess.call(args)
## Read data
print 'Reading data...'
latencyPeaks = []
latencyAverages = []
throughputInValues = []
throughputOutValues = []
def timestampStr2us(str):
# ' 0 0s 0m 98u 542n'
tokens = [float(re.sub('[a-z]', '', word)) for word in str.split()]
return tokens[0] * 1.0e9 + tokens[1] * 1.0e6 + tokens[2] * 1.0e3 + tokens[3] * 1.0e0 + tokens[4] * 1.0e-3
with open (latencyFile, "r") as myfile:
for line in myfile.read().split('\n'):
if not line:
continue
# 'Event delay: avg 0 0s 0m 0u 361n ', ' max 0 0s 0m 98u 542n'
avgDelayStr = line.split(':')[1].split(',')[0].replace('avg', '')
maxDelayStr = line.split(':')[1].split(',')[1].replace('max', '')
latencyAverages.append(timestampStr2us(avgDelayStr))
latencyPeaks.append(timestampStr2us(maxDelayStr))
with open (throughputInFile, "r") as myfile:
for line in myfile.read().split('\n'):
if not line:
continue
# 'Bits received per second: 67.6772 Mbps'
throughput = float(line.split(':')[1].split(' ')[1])
throughputInValues.append(throughput)
with open (throughputOutFile, "r") as myfile:
for line in myfile.read().split('\n'):
if not line:
continue
# 'Bits sent per second: 67.6772 Mbps'
throughput = float(line.split(':')[1].split(' ')[1])
throughputOutValues.append(throughput)
experiments = []
with open (dataFile, "r") as myfile:
data = myfile.read()
for line in data.split('\n'):
tokens = line.split('\t')
for i in range(len(tokens)):
tokens[i] = tokens[i].strip()
if tokens[0] == 'Experiment':
if experiments:
print experiments[-1]
print 'Found experiment:', tokens[1]
experiments.append(Experiment())
experiments[-1].tag = tokens[1]
experiments[-1].procDelayAverage = latencyAverages.pop(0)
experiments[-1].procDelayPeak = latencyPeaks.pop(0)
experiments[-1].throughputIn = throughputInValues.pop(0)
experiments[-1].throughputOut = throughputOutValues.pop(0)
try:
experiments[-1].policing = re.compile('policing-[0-9]+(\\.[0-9]+)?-[0-9]+(\\.[0-9]+)?').search(experiments[-1].tag).group(0).replace('policing-', '').replace('-', '/').replace('1.0/1.0', 'No policing')
except:
pass
try:
experiments[-1].shaping = re.compile('shaping-[0-9]+(\\.[0-9]+)?-[0-9]+(\\.[0-9]+)?').search(experiments[-1].tag).group(0).replace('shaping-', '').replace('-', '/')
except:
pass
try:
experiments[-1].transferSize = re.compile('transfer-size-[0-9]+(\\.[0-9]+)?-[0-9]+(\\.[0-9]+)?').search(experiments[-1].tag).group(0).replace('transfer-size-', '').replace('-', '/').replace('9999', 'Long')
sizes = experiments[-1].transferSize.split('/')
if len(sizes) == 2 and sizes[0] == sizes[1]:
experiments[-1].transferSize = sizes[0]
except:
pass
try:
experiments[-1].linkSpeed = re.compile('link-[0-9]+(\\.[0-9]+)?Mbps').search(experiments[-1].tag).group(0).replace('link-', '')
except:
pass
try:
experiments[-1].numFlows = str(4*int(re.compile('nflows-[0-9]+').search(experiments[-1].tag).group(0).replace('nflows-', '')))
except:
pass
try:
experiments[-1].tcp = re.compile('tcp-(cubic-reno|reno-cubic|cubic|reno)').search(experiments[-1].tag).group(0).replace('tcp-', '').replace('-', '/')
except:
pass
try:
experiments[-1].rtt = re.compile('rtt-[0-9]+-[0-9]+').search(experiments[-1].tag).group(0).replace('rtt-', '').replace('-', '/')
rtts = experiments[-1].rtt.split('/')
if len(rtts) == 2 and rtts[0] == rtts[1]:
experiments[-1].rtt = rtts[0]
except:
pass
try:
experiments[-1].congestion = re.compile('congestion-[a-zA-Z0-9]+-[a-zA-Z0-9]+').search(experiments[-1].tag).group(0).replace('congestion-', '').replace('-', '/').replace('light', 'lo').replace('medium', 'me').replace('high', 'hi').replace('none', 'no')
except:
pass
if experiments[-1].policing and experiments[-1].policing != 'No policing':
experiments[-1].congThreshold = 0.001
elif experiments[-1].shaping:
if experiments[-1].tcp == 'reno' and experiments[-1].transferSize == 'Long':
experiments[-1].congThreshold = 0.001
else:
experiments[-1].congThreshold = 0.001
else:
experiments[-1].congThreshold = 0.001
elif tokens[0] == 'Class':
experiments[-1].pathClass = [int(c) for c in tokens[1:]]
elif experiments and tokens[0] == str(experiments[-1].congThreshold):
print 'Hit 347'
for p in range(len(tokens) - 1):
print 'Hit 349'
if experiments[-1].pathClass[p] == 0:
experiments[-1].pCongClass1PerPath.append(float(tokens[1 + p]))
elif experiments[-1].pathClass[p] == 1:
experiments[-1].pCongClass2PerPath.append(float(tokens[1 + p]))
else:
print 'NO CLASS!!!', experiments[-1].pathClass[p]
print 'tokens = ', tokens
if len(tokens) < 2:
if experiments:
print experiments[-1]
break
if not experiments:
continue
# delay in us
# 100 Mb/s = 100 b/us
# d(us) = 1500 * 8(b) / speed(b/us)
experiments[-1].queuingDelay = (1500 * 8) / float(experiments[-1].linkSpeed.replace('Mbps', ''))
for e in experiments:
if not e.policing and not e.shaping:
e.policing = e.shaping = 'neutral'
args = ['bash', '-c', '''rm -f {0} 2>/dev/null'''.format(dataFile)]
print(args)
subprocess.call(args)
args = ['bash', '-c', '''rm -f {0} {1} {2} 2>/dev/null'''.format(latencyFile, throughputInFile, throughputOutFile)]
print(args)
subprocess.call(args)
## End of data reading
## Group experiments
print 'Grouping eperiments by parameters...'
experimentGroup1 = {}
experimentGroupTitle = {}
experimentGroupLegendTitle = {}
if plotNumber == '1-7':
key1 = 'policing'
if len(set([getattr(e, key1) for e in experiments])) > 1:
plotNumber = '1'
key1 = 'shaping'
if len(set([getattr(e, key1) for e in experiments])) > 1:
plotNumber = '7'
if plotNumber == '1-7':
raise SystemExit('Error')
key1 = ''
if plotNumber == '1' or plotNumber == '1b' or plotNumber == '5' or plotNumber == '5b':
key1 = 'policing'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Policing'
elif plotNumber == '2':
key1 = 'transferSize'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Transfer size'
elif plotNumber == '3':
key1 = 'linkSpeed'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Bottleneck bandwidth'
elif plotNumber == '4':
key1 = 'numFlows'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Total number of flows'
elif plotNumber == '7' or plotNumber == '7b' or plotNumber == '8' or plotNumber == '8b':
key1 = 'shaping'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Shaping'
elif plotNumber == '7b+' or plotNumber == '8b+':
key1 = 'shaping'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Shaping (cong. thresh. 0.25\\%)'
elif plotNumber == '9':
key1 = 'rtt'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'RTT'
elif plotNumber == '10':
key1 = 'tcp'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'TCP'
elif plotNumber == '11':
key1 = 'congestion'
experimentGroupTitle[key1] = '@label@'
experimentGroupLegendTitle[key1] = 'Congestion'
else:
print 'Bad plot number %s' % plotNumber
exit()
# 'key1': @key1 -> [ @experiment ]
allValues = sorted(list(set([getattr(e, key1) for e in experiments])))
experimentGroup1[key1] = {}
for v in allValues:
experimentGroup1[key1][str(v)] = [e for e in experiments if getattr(e, key1) == v]
# Remove empty lists
for key1 in experimentGroup1.keys():
for key2 in experimentGroup1[key1].keys():
if not experimentGroup1[key1][key2]:
del experimentGroup1[key1][key2]
if not experimentGroup1[key1]:
del experimentGroup1[key1]
print 'experimentGroup1 = ', experimentGroup1
## End of experiment grouping
## Plot
try:
os.makedirs(outputDir)
except OSError as exception:
pass
fig = 0
# [left, bottom, width, height]
figureBorder = [0.10, 0.20, 0.80, 0.70]
# Cleanup target dir
print 'Cleaning up target directory (%s)...' % outputDir
args = ['bash', '-c', '''cd '%s' && ls -1 | grep -E '^[0-9]+\\..*.(pdf|png)$' | while read -r f ; do rm -fv "$f" ; done || echo "Could not change dir to %s"''' % (outputDir, outputDir) ]
print(args)
subprocess.call(args)
print 'Generating plot %s...' % plotNumber
for key1 in naturalSorted(experimentGroup1.keys()):
if plotNumber == '1' or plotNumber == '1b' or plotNumber == '2' or plotNumber == '3' or plotNumber == '4' \
or plotNumber == '7' or plotNumber == '7b' or plotNumber == '7b+' or plotNumber == '9' or plotNumber == '10' \
or plotNumber == '11':
# Probability of congestion per class curve plots
curvex = []
curvexLabels = ['']
curves1y = []
curves2y = []
paramKeys = naturalSorted(experimentGroup1[key1].keys())
if plotNumber == '1' or plotNumber == '1b':
paramKeys = reversed(paramKeys)
elif plotNumber == '11':
paramKeys = sortCongestion(paramKeys)
print 'experimentGroup1[key1][key2][0] = ', experimentGroup1[key1][key2][0]
numPathsClass1 = len(experimentGroup1[key1][key2][0].pCongClass1PerPath)
for key2 in paramKeys:
curvex.append(1 + len(curvex))
curvexLabels.append(key2)
curvexLabels.append('')
print 'curvex =', curvex
print 'curvexLabels = ', curvexLabels
print 'numPathsClass1 = ', numPathsClass1
congThresholds = set()
for iPath in range(numPathsClass1):
curvey = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curvey.append(sorted(e.pCongClass1PerPath)[iPath])
congThresholds.add(e.congThreshold)
print 'curvey (1) =', curvey
curves1y.append(curvey)
if len(congThresholds) == 1:
congThresholdLabel = str(congThresholds.pop())
else:
congThresholdLabel = str(congThresholds)
extraTitleLabel += " {0}\\%".format(congThresholdLabel)
numPathsClass2 = len(experimentGroup1[key1][key2][0].pCongClass2PerPath)
for iPath in range(numPathsClass2):
curvey = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curvey.append(sorted(e.pCongClass2PerPath)[iPath])
print 'curvey (2) =', curvey
curves2y.append(curvey)
# Draw curve plot
fig += 1
plot = {}
metric = 'prob-cong-path'
plot['title'] = experimentGroupTitle[key1].replace('@metric@', 'Probability of congestion per path')
if extraTitleLabel:
plot['title'] = experimentGroupTitle[key1].replace('@label@', extraTitleLabel)
plot['xLabel'] = experimentGroupLegendTitle[key1]
plot['yLabel'] = 'Probability of congestion (\\%)'
plot['fontScale'] = 1.0
plot['grid'] = ''
plot['xmin'] = 0
plot['xmax'] = max(curvex) + 1
plot['ymin'] = 0
#plot['ymax'] = 100.0
plot['minorXTicks'] = 0
plot['majorXTicks'] = len(curvexLabels)
#
print 'curves1y = ', curves1y
print 'curves2y = ', curves2y
plot['data'] = []
for iPath in range(len(curves1y)):
curve = {}
curve['type'] = 'bar'
curve['x'] = curvex
curve['y'] = curves1y[iPath]
curve['hatch'] = '/' if iPath == 0 else '\\'
curve['label'] = 'Path class 1'
curve['color'] = [0.667, 0, 0.4]
plot['data'].append(curve)
for iPath in range(len(curves2y)):
curve = {}
curve['type'] = 'bar'
curve['x'] = curvex
curve['y'] = curves2y[iPath]
curve['hatch'] = '/' if iPath == 0 else '\\'
curve['label'] = 'Path class 2'
curve['color'] = [0., 0., 0.8]
plot['data'].append(curve)
plot['xTickLabels'] = curvexLabels
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '.pdf'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 100
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-300.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 50
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
print plot['fileName']
## Emulator latency (processing delay) plot
curve1y = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curve1y.append(e.procDelayPeak)
print curve1y
curve2y = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curve2y.append(e.procDelayAverage)
print curve2y
curve3y = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curve3y.append(e.queuingDelay)
print curve3y
# Draw curve plot
fig += 1
plot = {}
metric = 'latency'
plot['title'] = experimentGroupTitle[key1].replace('@metric@', 'Emulator latency')
if extraTitleLabel:
plot['title'] = experimentGroupTitle[key1].replace('@label@', extraTitleLabel)
plot['xLabel'] = experimentGroupLegendTitle[key1]
plot['yLabel'] = 'Processing delay (us)'
plot['fontScale'] = 1.0
plot['grid'] = ''
plot['xmin'] = 0
plot['xmax'] = max(curvex) + 1
plot['ymin'] = 0
#plot['ymax'] = 100.0
plot['minorXTicks'] = 0
plot['majorXTicks'] = len(curvexLabels)
#
plot['data'] = []
#
curve = {}
curve['type'] = 'bar'
curve['x'] = curvex
curve['y'] = curve1y
curve['hatch'] = ''
curve['label'] = 'Peak'
curve['color'] = [0., 0, 0.8]
plot['data'].append(curve)
#
curve = {}
curve['type'] = 'bar'
curve['x'] = curvex
curve['y'] = curve2y
curve['hatch'] = ''
curve['label'] = 'Average'
curve['color'] = [0., 0., 0.4]
plot['data'].append(curve)
#
curve = {}
curve['type'] = 'line'
curve['x'] = [plot['xmin'], plot['xmax']]
curve['y'] = [curve3y[0], curve3y[0]]
curve['label'] = 'Queuing delay of 1 frame'
curve['pattern'] = '--'
curve['color'] = [0., 0., 0.]
plot['data'].append(curve)
#
plot['xTickLabels'] = curvexLabels
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-latency' + '.pdf'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-latency' + '.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 100
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-latency' + '-300.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 50
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
print plot['fileName']
## Emulator throughput plot
curve1y = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curve1y.append(e.throughputIn)
print curve1y
curve2y = []
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curve2y.append(e.throughputOut)
print curve2y
# Draw curve plot
fig += 1
plot = {}
metric = 'throughput'
plot['title'] = experimentGroupTitle[key1].replace('@metric@', 'Throughput')
if extraTitleLabel:
plot['title'] = experimentGroupTitle[key1].replace('@label@', extraTitleLabel)
plot['xLabel'] = experimentGroupLegendTitle[key1]
plot['yLabel'] = 'Throughput (Mbps)'
plot['fontScale'] = 1.0
plot['grid'] = ''
plot['xmin'] = 0
plot['xmax'] = max(curvex) + 1
plot['ymin'] = 0
#plot['ymax'] = 100.0
plot['minorXTicks'] = 0
plot['majorXTicks'] = len(curvexLabels)
#
plot['data'] = []
#
curve = {}
curve['type'] = 'bar'
curve['x'] = curvex
curve['y'] = curve1y
curve['hatch'] = ''
curve['label'] = 'In'
curve['color'] = [0., 0, 0.8]
plot['data'].append(curve)
#
curve = {}
curve['type'] = 'bar'
curve['x'] = curvex
curve['y'] = curve2y
curve['hatch'] = ''
curve['label'] = 'Out'
curve['color'] = [0., 0., 0.4]
plot['data'].append(curve)
# Hardcode 100 Mbps line
curve = {}
curve['type'] = 'line'
curve['x'] = [plot['xmin'], plot['xmax']]
curve['y'] = [100.0, 100.0]
curve['label'] = ''
curve['pattern'] = '--'
curve['color'] = [0., 0., 0.]
plot['data'].append(curve)
#
plot['xTickLabels'] = curvexLabels
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-throughput' + '.pdf'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-throughput' + '.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 100
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-throughput' + '-300.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 50
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
print plot['fileName']
elif plotNumber == '5' or plotNumber == '5b' or plotNumber == '8' or plotNumber == '8b' or plotNumber == '8b+':
# Non-neutrality
curvex = []
curvexLabels = ['']
curvey = []
paramKeys = naturalSorted(experimentGroup1[key1].keys())
if plotNumber == '5' or plotNumber == '5b':
paramKeys = reversed(paramKeys)
for key2 in paramKeys:
curvex.append(1 + len(curvex))
curvexLabels.append(key2)
curvexLabels.append('')
print 'curvex =', curvex
print 'curvexLabels = ', curvexLabels
for key2 in paramKeys:
for e in experimentGroup1[key1][key2]:
curvey.append(max(e.pCongLinkComputed) - min(e.pCongLinkComputed))
print 'curvey =', curvey
# Draw curve plot
fig += 1
plot = {}
metric = 'non-neutrality'
plot['title'] = experimentGroupTitle[key1].replace('@metric@', 'Computed non-neutrality')
if extraTitleLabel:
plot['title'] = experimentGroupTitle[key1].replace('@label@', extraTitleLabel)
plot['xLabel'] = experimentGroupLegendTitle[key1]
plot['yLabel'] = 'Computed non-neutrality (0-100)'
plot['fontScale'] = 1.0
plot['grid'] = ''
plot['xmin'] = 0
plot['xmax'] = max(curvex) + 1
plot['ymin'] = 0
#plot['ymax'] = 100.0
plot['minorXTicks'] = 0
plot['majorXTicks'] = len(curvexLabels)
plot['noLegend'] = 1
#
plot['data'] = []
curve = {}
curve['type'] = 'line'
curve['x'] = curvex
curve['y'] = curvey
curve['pattern'] = '-+'
#curve['label'] = 'Computed non-neutrality'
curve['color'] = [0.667, 1.0, 0.8]
plot['data'].append(curve)
plot['xTickLabels'] = curvexLabels
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '.pdf'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 100
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
plot['fileName'] = outputDir + '/' + str(plotNumber) + '. stats ' + metric + ' - ' + key1 + '-300.png'
plot['fileName'] = plot['fileName'].replace('\\', ' ')
plot['fileName'] = plot['fileName'].replace('%', ' ')
plot['w'] = 8
plot['h'] = 6
plot['dpi'] = 50
with open(plot['fileName'] + '.json', 'wb') as f:
json.dump(plot, f)
nicePlot(plot)
print plot['fileName']
else:
print 'Unknown plotNumber:', plotNumber
exit()
| gpl-2.0 |
ichmonkey/graph | band.py | 1 | 3690 | """
You can use the proper typesetting unicode minus (see
http://en.wikipedia.org/wiki/Plus_sign#Plus_sign) or the ASCII hypen
for minus, which some people prefer. The matplotlibrc param
ax1es.unicode_minus controls the default behavior.
The default is to use the unicode minus
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
from mpl_toolkits.axes_grid1.parasite_axes import SubplotHost
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.gridspec as gridspec
import sys,re,string
def readData(inputFile):
Data=[]
f=open(inputFile)
lines=f.readlines()
nSet=len(lines)/2
#~ print nSet
eFermi=[6.207862,5.642064,5.013502]
i=0
for i in range(nSet):
label='band'
X=[float(x) for x in lines[i*2+0].split()]
Y=[float(y)-eFermi[i] for y in lines[i*2+1].split()]
Data.append([label,X,Y])
i+=1
f.close()
return Data
def draw(file='band.dat'):
titleFontSize=18
markerSize=11
lineWidth=3
matplotlib.rcParams['axes.unicode_minus'] = False
fig = plt.figure(figsize=(9.5,7))
#~ plt.subplots_adjust(top=0.92,bottom=0.08,left =0.1,right =0.95,hspace=0.4,wspace=0.3)
#~ band1
#~ gs1=gridspec.GridSpec(2,2)
#~ gs1.update(left=0.1, right=0.47, wspace=0.0)
ax2 = fig.add_subplot(111)
ax2.tick_params(direction='in', labelleft='on',labelright='off')
Data=readData(file)
lineSet=['bo','ro','go']
i=0
for data in Data:
labelt=data[0]
X=data[1]
Y=data[2]
ax2.plot(X,Y,'ko',label=labelt,markersize=5,linewidth=lineWidth,markeredgewidth =0)
i+=1
ax2.yaxis.set_major_locator(MultipleLocator(1))
ax2.yaxis.set_major_formatter(FormatStrFormatter('%d'))
ax2.yaxis.set_minor_locator(MultipleLocator(0.1))
#~ ax2.set_ylim(-7,2)
ax2.set_ylabel('E (eV)',size=15)
plt.show()
def readBand(file='EIGENVAL'):
fw=open('bandOUT.txt','w')
fdat=open('band.dat','w')
eFermi=0
k=(0,0,0)
kold=(0,0,0)
dk=0.0
kp=0.0
K=[]
En=[]
f=open(file,'r')
for i in range(7):
f.readline()
for line in f.readlines():
m= re.match(r'\s+(-*[0-9].[0-9]+E[+-][0-9]+)\s+(-*[0-9].[0-9]+E[+-][0-9]+)\s+(-*[0-9].[0-9]+E[+-][0-9]+)',line)
if m :
"""
k point distance calculation
"""
k=( float(m.group(1)) , float(m.group(2)) , float(m.group(3)) )
if dk < 1000 :
dk=pow(pow(k[0]-kold[0],2)+pow(k[1]-kold[1],2)+pow(k[2]-kold[2],2),0.5)
if dk>0.2:
dk=0
else:
dk=0
kold=k
kp=kp+dk
#print "matched"
#~ if len(band)>0:
#~ bands.append(band)
#~ band=[]
else:
if len(line)>2:
fw.write(str(kp)+'\t'+line[0:len(line)-2]+'\n')
K.append(str(kp))
En.append(str((float(line.split()[1])-eFermi)))
#~ print str(kp)+'\t'+line[0:len(line)-2].strip()
for i in range(len(K)):
fw.write(str(K[i])+'\t'+En[i]+'\n')
for k in K:
fdat.write(k+' ')
fdat.write('\n')
for en in En:
fdat.write(en+' ')
f.close()
fw.close()
fdat.close()
readBand('EIGENVAL')
draw() | gpl-2.0 |
tbpmig/mig-website | mig_main/demographics.py | 1 | 10847 | import numpy
from matplotlib import pyplot
from django.db.models import Count
from django.http import HttpResponse
from event_cal.models import CalendarEvent
from history.models import Distinction, Officer
from mig_main.models import (
MemberProfile,
Major,
Status,
Standing,
AcademicTerm,
ShirtSize,
TBPChapter,
ALUM_MAIL_FREQ_CHOICES,
GENDER_CHOICES,
)
from mig_main.utility import UnicodeWriter
from requirements.models import ProgressItem, DistinctionType, SemesterType
def get_members(init_term=None,
include_electees=True,
include_stopped_electing=False,
include_actives=True,
include_grads=True,
include_ugrads=True,
include_alums=False,
only_around=False,
only_active_status=False,
only_da=False,
only_pa=False):
members = MemberProfile.objects.all()
# handle status partitions
if not include_electees:
members = members.exclude(status__name='Electee')
elif not include_stopped_electing:
members = members.exclude(still_electing=False,
status__name='Electee')
if not include_actives:
members = members.exclude(status__name='Active')
# handle standing partitions
if not include_grads:
members = members.exclude(standing__name="Graduate")
if not include_ugrads:
members = members.exclude(standing__name='Undergraduate')
if not include_alums:
members = members.exclude(standing__name='Alumni')
last_two_terms = [
AcademicTerm.get_current_term(),
AcademicTerm.get_current_term().get_previous_full_term()
]
if only_around:
recent_progress = ProgressItem.objects.filter(term__in=last_two_terms)
members = members.filter(progressitem__in=recent_progress)
if only_active_status:
recent_active = Distinction.objects.filter(
term__in=last_two_terms,
distinction_type__name='Active'
)
members = members.filter(distinction__in=recent_active)
if only_da:
recent_active = Distinction.objects.filter(
term__in=last_two_terms,
distinction_type__name='Distinguished Active'
)
members = members.filter(distinction__in=recent_active)
if only_pa:
recent_active = Distinction.objects.filter(
term__in=last_two_terms,
distinction_type__name='Prestigious Active'
)
members = members.filter(distinction__in=recent_active)
if init_term:
members = members.filter(
init_term=init_term,
init_chapter__state='MI',
init_chapter__letter='G'
)
return members.distinct()
def get_distribution(cls, **kwargs):
members = get_members(**kwargs)
return cls.objects.filter(memberprofile__in=members).annotate(
num_members=Count('memberprofile')
)
def get_major_distribution(**kwargs):
return get_distribution(Major, **kwargs)
def get_gender_distribution(**kwargs):
members = get_members(**kwargs)
genders = [{
'name': gender[1],
'num_members': members.filter(gender=gender[0]).distinct().count()
} for gender in GENDER_CHOICES]
return genders
def get_alum_mail_pref_distribution(**kwargs):
members = get_members(**kwargs)
prefs = [{
'name': pref[1],
'num_members': members.filter(
alum_mail_freq=pref[0]
).distinct().count()} for pref in ALUM_MAIL_FREQ_CHOICES
]
return prefs
def get_meeting_interest_distribution(**kwargs):
members = get_members(**kwargs)
prefs = [{
'name': pref,
'num_members': members.filter(meeting_speak=pref).distinct().count()
} for pref in [True, False]]
return prefs
def get_status_distribution(**kwargs):
return get_distribution(Status, **kwargs)
def get_standing_distribution(**kwargs):
return get_distribution(Standing, **kwargs)
def get_distinction_distribution(**kwargs):
term = kwargs.pop('term', None)
members = get_members(**kwargs)
distinctions = Distinction.objects.filter(member__in=members)
if term:
distinctions = distinctions.filter(term=term)
return DistinctionType.objects.filter(
distinction__in=distinctions
).annotate(num_members=Count('distinction'))
def get_event_led_distribution(**kwargs):
term = kwargs.pop('term', None)
output = []
members = get_members(**kwargs)
if term:
events = CalendarEvent.objects.filter(term=term)
else:
events = CalendarEvent.objects.all()
num_mem = members.count()
num_led = members.filter(event_leader__in=events).distinct().count()
output.append({'name': True, 'num_members': num_led})
output.append({'name': False, 'num_members': num_mem-num_led})
return output
def get_was_officer_distribution(**kwargs):
term = kwargs.pop('term', None)
output = []
members = get_members(**kwargs)
if term:
officers = Officer.objects.filter(term=term)
else:
officers = Officer.objects.all()
num_mem = members.count()
num_yes = members.filter(officer__in=officers).distinct().count()
output.append({'name': True, 'num_members': num_yes})
output.append({'name': False, 'num_members': num_mem-num_yes})
return output
def get_shirt_size_distribution(**kwargs):
return get_distribution(ShirtSize, **kwargs)
def get_init_chapter_distribution(**kwargs):
return get_distribution(TBPChapter, **kwargs)
def get_year_when_join_distribution(**kwargs):
distribution = {}
members = get_members(**kwargs)
for member in members:
if member.expect_grad_date.month > 6:
grad_term = SemesterType.objects.get(name='Fall')
# Winter grad
pass
else:
# Spring grad
grad_term = SemesterType.objects.get(name='Winter')
year_diff = member.expect_grad_date.year-member.init_term.year
term_diff = grad_term-member.init_term.semester_type
terms_diff = 3*year_diff+term_diff
if terms_diff in distribution:
distribution[terms_diff] += 1
else:
distribution[terms_diff] = 1
dist_list = [
{
'name': key,
'num_members': value
} for key, value in distribution.iteritems()
]
return dist_list
def get_area_chart_of_distribution(dist):
pass
def get_members_for_COE():
members = MemberProfile.get_actives().exclude(standing__name='Alumni')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="MemberData.csv"'
writer = UnicodeWriter(response)
writer.writerow([
'First Name',
'Last Name',
'uniqname',
'Active?',
'Officer?',
'Standing',
'Major'
])
for member in members:
current_term = AcademicTerm.get_current_term()
was_active = 'Active' if Distinction.objects.filter(
member=member,
term=current_term.get_previous_full_term()
).exists() else 'Inactive'
officer_terms = Officer.objects.filter(
user=member,
term__in=[
current_term.get_previous_full_term(),
current_term,
]
)
if officer_terms.exists():
officer_pos = ', '.join(
[unicode(officer.position) + ' ' +
', '.join(
[unicode(term) for term in officer.term.all()]
) for officer in officer_terms])
else:
officer_pos = 'Member'
writer.writerow([
member.first_name,
member.last_name,
member.uniqname,
was_active,
officer_pos,
member.standing.name,
', '.join([major.name for major in member.major.all()])
])
return response
def get_members_for_email():
members = MemberProfile.objects.all().order_by('last_name','first_name','uniqname')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="MemberData_forEmail.csv"'
writer = UnicodeWriter(response)
writer.writerow([
'First Name',
'Last Name',
'uniqname',
'Status',
'Standing',
'Email Preference',
'Corporate Email Preference',
'Graduation Date',
'Most Recent Event'
])
for member in members:
current_term = AcademicTerm.get_current_term()
progress_terms = AcademicTerm.objects.filter(progressitem__in=member.progressitem_set.all()).distinct()
events = CalendarEvent.objects.filter(eventshift__in=member.event_attendee.all()).distinct()
event_terms = AcademicTerm.objects.filter(calendarevent__in=events).distinct()
if progress_terms.exists():
if event_terms.exists():
most_recent_term = max(max(progress_terms),max(event_terms)).get_abbreviation()
else:
most_recent_term = max(progress_terms).get_abbreviation()
else:
if event_terms.exists():
most_recent_term = max(event_terms).get_abbreviation()
else:
most_recent_term = 'None'
stopped_electing = member.is_electee() and not member.still_electing
mail_pref = member.get_alum_mail_freq_display()
if not member.is_alumni():
mail_pref = 'N/A'
writer.writerow([
member.first_name,
member.last_name,
member.uniqname,
unicode(member.status) if not stopped_electing else unicode(member.status)+' (stopped)',
unicode(member.standing),
mail_pref,
'TRUE' if member.jobs_email else 'FALSE',
unicode(member.expect_grad_date),
most_recent_term,
])
return response | apache-2.0 |
chrisburr/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
fyffyt/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/music21/audioSearch/scoreFollower.py | 1 | 22926 | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Name: audioSearch.scoreFollower.py
# Purpose: Detection of the position in the score in real time
#
#
# Authors: Jordi Bartolome
# Michael Scott Cuthbert
#
# Copyright: Copyright © 2011 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#------------------------------------------------------------------------------
from time import time
import math
import os
import unittest
from music21 import scale
from music21 import search
from music21 import environment
_MOD = 'audioSearch/transcriber.py'
environLocal = environment.Environment(_MOD)
class ScoreFollower(object):
def __init__(self, scoreStream=None):
self.scoreStream = scoreStream
if scoreStream is not None:
self.scoreNotesOnly = scoreStream.flat.notesAndRests
else:
self.scoreNotesOnly = None
self.waveFile = environLocal.getRootTempDir() + \
os.path.sep + \
'scoreFollowerTemp.wav'
self.lastNotePostion = 0
self.currentSample = 0
self.totalFile = 0
self.lastNotePosition = 0
self.startSearchAtSlot = 0
self.predictedNotePosition = 0
self.countdown = 0
self.END_OF_SCORE = False
self.qle = None
self.firstNotePage = None
self.lastNotePage = None
self.firstSlot = 1
self.silencePeriodCounter = 0
self.notesCounter = 0
self.begins = True
self.useScale = None
self.silencePeriod = None
self.result = None
self.useMic = None
self.processing_time = None
self.seconds_recording = None
def runScoreFollower(
self,
plot=False,
useMic=False,
seconds=15.0,
useScale=None,
):
'''
The main program. It runs the 'repeatTranscription' until the
performance ends.
If `useScale` is none, then it uses a scale.ChromaticScale
'''
if useScale is None:
useScale = scale.ChromaticScale('C4')
self.seconds_recording = seconds
self.useMic = useMic
self.useScale = useScale
self.result = False
while(self.result is False):
self.result = self.repeatTranscription()
# if plot is True:
# try:
# import matplotlib.pyplot # for find
# except ImportError:
# raise AudioSearchException("Cannot plot without matplotlib installed.")
#
# matplotlib.pyplot.plot(listplot)
# matplotlib.pyplot.show()
environLocal.printDebug("* END")
def repeatTranscription(self):
'''
First, it records from the microphone (or from a file if is used for
test). Later, it processes the signal in order to detect the pitches.
It converts them into music21 objects and compares them with the score.
It finds the best matching position of the recorded signal with the
score, and decides, depending on matching accuracy, the last note
predicted and some other parameters, in which position the recorded
signal is.
It returns a value that is False if the song has not finished, or true
if there has been a problem like some consecutive bad matchings or the
score has finished.
>>> from music21.audioSearch import scoreFollower
>>> scoreNotes = " ".join(["c4", "d", "e", "f", "g", "a", "b", "c'", "c", "e",
... "g", "c'", "a", "f", "d", "c#", "d#", "f#","c", "e", "g", "c'",
... "a", "f", "d", "c#", "d#", "f#"])
>>> scNotes = converter.parse("tinynotation: 4/4 " + scoreNotes, makeNotation=False)
>>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
>>> ScF.useMic = False
>>> import os #_DOCS_HIDE
>>> readPath = os.path.join(common.getSourceFilePath(), 'audioSearch', 'test_audio.wav') #_DOCS_HIDE
>>> ScF.waveFile = readPath #_DOCS_HIDE
>>> #_DOCS_SHOW ScF.waveFile = 'test_audio.wav'
>>> ScF.seconds_recording = 10
>>> ScF.useScale = scale.ChromaticScale('C4')
>>> ScF.currentSample = 0
>>> exitType = ScF.repeatTranscription()
>>> print(exitType)
False
>>> print(ScF.lastNotePosition)
10
'''
from music21 import audioSearch
# print "WE STAY AT:",
# print self.lastNotePosition, len(self.scoreNotesOnly),
# print "en percent %d %%" % (self.lastNotePosition * 100 / len(self.scoreNotesOnly)),
# print " this search begins at: ", self.startSearchAtSlot,
# print "countdown %d" % self.countdown
# print "Measure last note", self.scoreStream[self.lastNotePosition].measureNumber
environLocal.printDebug("repeat transcription starting")
if self.useMic is True:
freqFromAQList = audioSearch.getFrequenciesFromMicrophone(
length=self.seconds_recording,
storeWaveFilename=None,
)
else:
freqFromAQList, self.waveFile, self.currentSample = \
audioSearch.getFrequenciesFromPartialAudioFile(
self.waveFile,
length=self.seconds_recording,
startSample=self.currentSample,
)
if self.totalFile == 0:
self.totalFile = self.waveFile.getnframes()
environLocal.printDebug("got Frequencies from Microphone")
time_start = time()
detectedPitchesFreq = audioSearch.detectPitchFrequencies(
freqFromAQList, self.useScale)
detectedPitchesFreq = audioSearch.smoothFrequencies(
detectedPitchesFreq)
detectedPitchObjects, unused_listplot = \
audioSearch.pitchFrequenciesToObjects(
detectedPitchesFreq, self.useScale)
notesList, durationList = audioSearch.joinConsecutiveIdenticalPitches(
detectedPitchObjects)
self.silencePeriodDetection(notesList)
environLocal.printDebug("made it to here...")
scNotes = self.scoreStream[self.lastNotePosition:self.lastNotePosition
+ len(notesList)]
#print "1"
transcribedScore, self.qle = audioSearch.notesAndDurationsToStream(
notesList,
durationList,
scNotes=scNotes,
qle=self.qle,
)
#print "2"
totalLengthPeriod, self.lastNotePosition, prob, END_OF_SCORE = \
self.matchingNotes(
self.scoreStream,
transcribedScore,
self.startSearchAtSlot,
self.lastNotePosition,
)
#print "3"
self.processing_time = time() - time_start
environLocal.printDebug("and even to here...")
if END_OF_SCORE is True:
exitType = "endOfScore" # "endOfScore"
return exitType
# estimate position, or exit if we can't at all...
exitType = self.updatePosition(prob, totalLengthPeriod, time_start)
if self.useMic is False: # reading from the disc (only for TESTS)
# skip ahead the processing time.
freqFromAQList, junk, self.currentSample = \
audioSearch.getFrequenciesFromPartialAudioFile(
self.waveFile,
length=self.processing_time,
startSample=self.currentSample,
)
if self.lastNotePosition > len(self.scoreNotesOnly):
#print "finishedPerforming"
exitType = "finishedPerforming"
elif (self.useMic is False and self.currentSample >= self.totalFile):
#print "waveFileEOF"
exitType = "waveFileEOF"
environLocal.printDebug("about to return -- exitType: %s " % exitType)
return exitType
def silencePeriodDetection(self, notesList):
'''
Detection of consecutive periods of silence.
Useful if the musician has some consecutive measures of silence.
>>> from music21.audioSearch import scoreFollower
>>> scNotes = corpus.parse('luca/gloria').parts[0].flat.notes
>>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
>>> notesList = []
>>> notesList.append(note.Rest())
>>> ScF.notesCounter = 3
>>> ScF.silencePeriodCounter = 0
>>> ScF.silencePeriodDetection(notesList)
>>> ScF.notesCounter
0
>>> ScF.silencePeriodCounter
1
>>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
>>> notesList = []
>>> notesList.append(note.Rest())
>>> notesList.append(note.Note())
>>> ScF.notesCounter = 1
>>> ScF.silencePeriodCounter = 3
>>> ScF.silencePeriodDetection(notesList)
>>> ScF.notesCounter
2
>>> ScF.silencePeriodCounter
0
'''
onlyRests = True
for i in notesList:
if i.name != 'rest':
onlyRests = False
if onlyRests is True:
self.silencePeriod = True
self.notesCounter = 0
self.silencePeriodCounter += 1
else:
self.silencePeriod = False
self.notesCounter += 1
self.silencePeriodCounter = 0
def updatePosition(self, prob, totalLengthPeriod, time_start):
'''
It updates the position in which the scoreFollower starts to search at,
and the predicted position in which the new fragment of the score
should start. It updates these positions taking into account the value
of the "countdown", and if is the beginning of the song or not.
It returns the exitType, which determines whether the scoreFollower has
to stop (and why) or not.
See example of a bad prediction at the beginning of the song:
>>> from time import time
>>> from music21.audioSearch import scoreFollower
>>> scNotes = corpus.parse('luca/gloria').parts[0].flat.notes
>>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
>>> ScF.begins = True
>>> ScF.startSearchAtSlot = 15
>>> ScF.countdown = 0
>>> prob = 0.5 # bad prediction
>>> totalLengthPeriod = 15
>>> time_start = time()
>>> exitType = ScF.updatePosition(prob, totalLengthPeriod, time_start)
>>> print(ScF.startSearchAtSlot)
0
Different examples for different countdowns:
Countdown = 0:
The last matching was good, so it calculates the position in which it
starts to search at, and the position in which the music should start.
>>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
>>> ScF.scoreNotesOnly = scNotes.flat.notesAndRests
>>> ScF.begins = False
>>> ScF.countdown = 0
>>> ScF.startSearchAtSlot = 15
>>> ScF.lastNotePosition = 38
>>> ScF.predictedNotePosition = 19
>>> ScF.seconds_recording = 10
>>> prob = 0.8
>>> totalLengthPeriod = 25
>>> time_start = time()
>>> exitType = ScF.updatePosition(prob, totalLengthPeriod, time_start)
>>> print(ScF.startSearchAtSlot)
38
>>> ScF.predictedNotePosition >=38
True
Countdown = 1:
Now it doesn't change the slot in which it starts to search at.
It also predicts the position in which the music should start.
>>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
>>> ScF.begins = False
>>> ScF.countdown = 1
>>> ScF.startSearchAtSlot = 15
>>> ScF.lastNotePosition = 15
>>> ScF.predictedNotePosition = 19
>>> ScF.seconds_recording = 10
>>> prob = 0.8
>>> totalLengthPeriod = 25
>>> time_start = time()
>>> exitType = ScF.updatePosition(prob, totalLengthPeriod, time_start)
>>> print(ScF.startSearchAtSlot)
15
>>> ScF.predictedNotePosition > 15
True
Countdown = 2:
Now it starts searching at the beginning of the page of the screen.
The note prediction is also the beginning of the page.
>>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
>>> ScF.begins = False
>>> ScF.countdown = 2
>>> ScF.startSearchAtSlot = 15
>>> ScF.lastNotePosition = 15
>>> ScF.predictedNotePosition = 19
>>> ScF.seconds_recording = 10
>>> prob = 0.8
>>> totalLengthPeriod = 25
>>> time_start = time()
>>> exitType = ScF.updatePosition(prob, totalLengthPeriod, time_start)
>>> print(ScF.startSearchAtSlot)
15
>>> print(ScF.predictedNotePosition)
39
Countdown = 4:
Now it starts searching at the beginning of the page of the screen.
The note prediction is also the beginning of the page.
>>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
>>> ScF.begins = False
>>> ScF.countdown = 4
>>> ScF.startSearchAtSlot = 15
>>> ScF.lastNotePosition = 15
>>> ScF.predictedNotePosition = 19
>>> ScF.seconds_recording = 10
>>> prob = 0.8
>>> totalLengthPeriod = 25
>>> time_start = time()
>>> exitType = ScF.updatePosition(prob, totalLengthPeriod, time_start)
>>> print(ScF.startSearchAtSlot)
0
>>> print(ScF.predictedNotePosition)
0
Countdown = 5:
Now it stops the program.
>>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
>>> ScF.begins = False
>>> ScF.countdown = 5
>>> ScF.startSearchAtSlot = 15
>>> ScF.lastNotePosition = 15
>>> ScF.predictedNotePosition = 19
>>> ScF.seconds_recording = 10
>>> prob = 0.8
>>> totalLengthPeriod = 25
>>> time_start = time()
>>> exitType = ScF.updatePosition(prob, totalLengthPeriod, time_start)
>>> print(exitType)
countdownExceeded
'''
exitType = False
if not self.begins:
if self.countdown == 0:
# successfully matched last note; predict next position.
self.startSearchAtSlot = self.lastNotePosition
processing_time = time() - time_start
self.predictedNotePosition = self.predictNextNotePosition(
totalLengthPeriod, processing_time)
elif self.countdown == 1:
# do nothing to startSearch or predicted note position
totalSeconds = 2 * (time() - time_start) + \
self.seconds_recording
self.predictedNotePosition = self.predictNextNotePosition(
totalLengthPeriod, totalSeconds)
elif self.countdown == 2:
# another chance to match notes
totalSeconds = 3 * (time() - time_start) + \
self.seconds_recording
self.predictedNotePosition = self.predictNextNotePosition(
totalLengthPeriod, totalSeconds)
elif self.countdown == 3:
# searching at the beginning of the shown pages
self.lastNotePosition = self.firstSlot
self.startSearchAtSlot = self.firstSlot
self.predictedNotePosition = self.firstSlot
elif self.countdown == 4:
# SEARCHING IN ALL THE SCORE;
# MAYBE THE MUSICIAN HAS STARTED FROM THE BEGINNING
self.lastNotePosition = 0
self.startSearchAtSlot = 0
self.predictedNotePosition = 0
else: # self.countdown >= 5:
#print "Exit due to bad recognition or rests"
environLocal.printDebug("COUNTDOWN = 5")
exitType = 'countdownExceeded'
else: # at beginning
if prob < 0.7: # to avoid rests at the beginning
self.lastNotePosition = 0
self.startSearchAtSlot = 0
environLocal.printDebug("Silence or noise at the beginning")
else: # got some good notes at the beginning!
self.begins = False
# print "GO!"
if self.countdown >= 5:
exitType = "5consecutiveCountdownsBeginning"
return exitType
def getFirstSlotOnScreen(self):
'''
Returns the index of the first element on the screen right now.
Doesn't work. (maybe it's not necessary)
'''
return 0
def predictNextNotePosition(self, totalLengthPeriod, totalSeconds):
'''
It predicts the position in which the first note of the following
recording note should start, taking into account the processing time of
the computer. It has two inputs: totalLengthPeriod, that is the number
of pulses or beats in the recorded audio, and totalSeconds, that is the
length in seconds of the processing time.
It returns a number with the position of the predicted note in the
score.
>>> from time import time
>>> from music21.audioSearch import scoreFollower
>>> scNotes = corpus.parse('luca/gloria').parts[0].flat.notes
>>> ScF = scoreFollower.ScoreFollower(scoreStream=scNotes)
>>> ScF.scoreNotesOnly = ScF.scoreStream.flat.notesAndRests
>>> ScF.lastNotePosition = 14
>>> ScF.seconds_recording = 10.0
>>> totalLengthPeriod = 8
>>> totalSeconds = 2.0
>>> predictedStartPosition = ScF.predictNextNotePosition(
... totalLengthPeriod, totalSeconds)
>>> print(predictedStartPosition)
18
'''
extraLength = totalLengthPeriod * totalSeconds / float(self.seconds_recording)
middleRhythm = 0
slots = 0
while middleRhythm < extraLength:
middleRhythm = middleRhythm + self.scoreNotesOnly[
self.lastNotePosition + slots].quarterLength
slots = slots + 1
predictedStartingNotePosition = int(slots + self.lastNotePosition)
return predictedStartingNotePosition
def matchingNotes(
self,
scoreStream,
transcribedScore,
notePrediction,
lastNotePosition,
):
from music21 import audioSearch
# Analyzing streams
tn_recording = int(len(transcribedScore.flat.notesAndRests))
totScores = []
beginningData = []
lengthData = []
END_OF_SCORE = False
# take 10% more of samples
tn_window = int(math.ceil(tn_recording * 1.1))
hop = int(math.ceil(tn_window / 4))
if hop == 0:
iterations = 1
else:
iterations = int((math.floor(len(scoreStream) / hop)) -
math.ceil(tn_window / hop))
for i in range(iterations):
scNotes = scoreStream[i * hop + 1:i * hop + tn_recording + 1]
name = "%d" % i
beginningData.append(i * hop + 1)
lengthData.append(tn_recording)
scNotes.id = name
totScores.append(scNotes)
listOfParts = search.approximateNoteSearchWeighted(
transcribedScore.flat.notesAndRests, totScores)
#decision process
if notePrediction > len(scoreStream) - tn_recording - hop - 1:
notePrediction = len(scoreStream) - tn_recording - hop - 1
END_OF_SCORE = True
environLocal.printDebug("LAST PART OF THE SCORE")
#lastCountdown = self.countdown
position, self.countdown = audioSearch.decisionProcess(
listOfParts,
notePrediction,
beginningData,
lastNotePosition,
self.countdown,
self.firstNotePage,
self.lastNotePage,
)
totalLength = 0
number = int(listOfParts[position].id)
if self.silencePeriod is True and self.silencePeriodCounter < 5:
# print lastCountdown, self.countdown, lastNotePosition, beginningData[number], lengthData[number]
environLocal.printDebug("All rest period")
self.countdown -= 1
if self.countdown != 0:
probabilityHit = 0
else:
probabilityHit = listOfParts[position].matchProbability
unused_listOfParts2 = search.approximateNoteSearch(transcribedScore.flat.notesAndRests, totScores)
unused_listOfParts3 = search.approximateNoteSearchNoRhythm(transcribedScore.flat.notesAndRests, totScores)
unused_listOfParts4 = search.approximateNoteSearchOnlyRhythm(transcribedScore.flat.notesAndRests, totScores)
# print "PROBABILITIES:",
# print "pitches and durations weighted (current)",listOfParts[position].matchProbability,
# print "pitches and durations without weighting" , listOfParts2[position].matchProbability,
# print "pitches", listOfParts3[position].matchProbability,
# print "durations",listOfParts4[position].matchProbability
for i in range(len(totScores[number])):
totalLength = totalLength + totScores[number][i].quarterLength
if self.countdown == 0 and self.silencePeriodCounter == 0:
lastNotePosition = beginningData[number] + lengthData[number]
return totalLength, lastNotePosition, probabilityHit, END_OF_SCORE
#------------------------------------------------------------------------------
class TestExternal(unittest.TestCase):
pass
def runTest(self):
pass
def xtestRunScoreFollower(self):
from music21 import corpus
scNotes = corpus.parse('luca/gloria').parts[0].flat.notesAndRests
ScF = ScoreFollower(scoreStream=scNotes)
ScF.runScoreFollower(plot=False, useMic=True, seconds=10.0)
#------------------------------------------------------------------------------
if __name__ == '__main__':
import music21
music21.mainTest(TestExternal)
#------------------------------------------------------------------------------
# eof
| mit |
chiahaoliu/pdf_lib | pdf_lib/qdamp_test.py | 1 | 1510 | import os
import time
import yaml
import json
import datetime
import numpy as np
import pandas as pd
from time import strftime
from pprint import pprint
import matplotlib.pyplot as plt
from diffpy.Structure import loadStructure
from diffpy.Structure import StructureFormatError
from diffpy.srreal.structureadapter import nosymmetry
from diffpy.srreal.pdfcalculator import DebyePDFCalculator
from diffpy.srreal.pdfcalculator import PDFCalculator
from pdf_lib.glbl import pdfCal_cfg, Uiso, delta2
cal = PDFCalculator()
cal.delta2 = delta2
ni = loadStructure('ni.cif')
ni.Uisoequiv = Uiso
nacl = loadStructure('1000041.cif')
nacl.Uisoequiv = Uiso
print("Uiso = {}".format(Uiso))
def qdamp_test(struc, rmax_val, qdamp_array=None):
pdfCal_cfg['rmax'] = rmax_val
N = qdamp_array.size
fig, ax = plt.subplots(N, figsize=(20, 6),
sharex=True, sharey=True)
for _ax, qdamp_val in zip(ax, qdamp_array):
pdfCal_cfg['qdamp'] = qdamp_val
r, g = cal(struc, **pdfCal_cfg)
#cal.setStructure(struc)
#for k, v in pdfCal_cfg.items():
# setattr(cal, k, v)
#cal.eval()
#r = cal.rgrid
#g = cal.pdf
_ax.plot(r, g, linestyle='-',
label="{:.3f}".format(qdamp_val))
_ax.legend()
fig.suptitle('{}'.format(struc.composition))
print(cal.rstep)
print(cal.slope)
qdamp_array = np.arange(0, 0.1, 0.02)
rmax = 50.
qdamp_test(ni, rmax, qdamp_array)
qdamp_test(nacl, rmax, qdamp_array)
| mit |
vybstat/scikit-learn | sklearn/utils/tests/test_extmath.py | 70 | 16531 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
MicrosoftGenomics/FaST-LMM | fastlmm/association/single_snp_linreg.py | 1 | 8395 | from fastlmm.util.runner import *
import logging
import fastlmm.pyplink.plink as plink
from pysnptools.snpreader import Pheno
import pysnptools.util as pstutil
import fastlmm.util.util as flutil
import numpy as np
import scipy.stats as stats
from pysnptools.snpreader import Bed
from fastlmm.util.pickle_io import load, save
import time
import pandas as pd
from fastlmm.inference.lmm_cov import LMM as fastLMM
import warnings
from pysnptools.snpreader import SnpReader
from pysnptools.snpreader import SnpData
from pysnptools.standardizer import Unit
from pysnptools.standardizer import Identity as SS_Identity
from pysnptools.standardizer import DiagKtoN
from pysnptools.standardizer import UnitTrained
from pysnptools.kernelreader import Identity as KernelIdentity
from pysnptools.kernelreader import KernelData
from pysnptools.kernelreader import SnpKernel
from pysnptools.kernelreader import KernelNpz
from fastlmm.util.mapreduce import map_reduce
from pysnptools.util import create_directory_if_necessary
from pysnptools.snpreader import wrap_matrix_subset
from pysnptools.util.intrangeset import IntRangeSet
from fastlmm.inference.fastlmm_predictor import _snps_fixup, _pheno_fixup, _kernel_fixup, _SnpTrainTest
import fastlmm.inference.linear_regression as lin_reg
from fastlmm.association.single_snp import _set_block_size
def single_snp_linreg(test_snps, pheno, covar=None, max_output_len=None, output_file_name=None, GB_goal=None, runner=None, count_A1=None):
"""
Function performing single SNP GWAS using linear regression. Will reorder and intersect IIDs as needed.
:param test_snps: SNPs to test. Can be any :class:`.SnpReader`. If you give a string, it should be the base name of a set of PLINK Bed-formatted files.
(For backwards compatibility can also be dictionary with keys 'vals', 'iid', 'header')
:type test_snps: a :class:`.SnpReader` or a string
:param pheno: A single phenotype: Can be any :class:`.SnpReader`, for example, :class:`.Pheno` or :class:`.SnpData`.
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
Any IIDs with missing values will be removed.
(For backwards compatibility can also be dictionary with keys 'vals', 'iid', 'header')
:type pheno: a :class:`.SnpReader` or a string
:param covar: covariate information, optional: Can be any :class:`.SnpReader`, for example, :class:`.Pheno` or :class:`.SnpData`.
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
(For backwards compatibility can also be dictionary with keys 'vals', 'iid', 'header')
:type covar: a :class:`.SnpReader` or a string
:param max_output_len: Maximum number of Pvalues to return. Default to None, which means 'Return all'.
:type max_output_len: number
:param output_file_name: Name of file to write results to, optional. If not given, no output file will be created. The output format is tab-deleted text.
:type output_file_name: file name
:param GB_goal: gigabytes of memory the run should use, optional. If not given, will read the test_snps in blocks of size iid_count,
which is memory efficient with little overhead on computation time.
:type GB_goal: number
:param runner: a runner, optional: Tells how to run locally, multi-processor, or on a cluster.
If not given, the function is run locally.
:type runner: a runner.
:param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
:type count_A1: bool
:rtype: Pandas dataframe with one row per test SNP. Columns include "PValue"
:Example:
>>> import logging
>>> import numpy as np
>>> from fastlmm.association import single_snp_linreg
>>> from pysnptools.snpreader import Bed
>>> logging.basicConfig(level=logging.INFO)
>>> pheno_fn = "../feature_selection/examples/toydata.phe"
>>> results_dataframe = single_snp_linreg(test_snps="../feature_selection/examples/toydata.5chrom", pheno=pheno_fn, count_A1=False)
>>> print results_dataframe.iloc[0].SNP,round(results_dataframe.iloc[0].PValue,7),len(results_dataframe)
null_576 1e-07 10000
"""
assert test_snps is not None, "test_snps must be given as input"
test_snps = _snps_fixup(test_snps,count_A1=count_A1)
pheno = _pheno_fixup(pheno,count_A1=count_A1).read()
assert pheno.sid_count == 1, "Expect pheno to be just one variable"
pheno = pheno[(pheno.val==pheno.val)[:,0],:]
covar = _pheno_fixup(covar, iid_if_none=pheno.iid)
test_snps, pheno, covar = pstutil.intersect_apply([test_snps, pheno, covar])
logging.debug("# of iids now {0}".format(test_snps.iid_count))
if GB_goal is not None:
bytes_per_sid = test_snps.iid_count * 8
sid_per_GB_goal = 1024.0**3*GB_goal/bytes_per_sid
block_size = max(1,int(sid_per_GB_goal+.5))
block_count = test_snps.sid_count / block_size
else:
block_count = 1
block_size = test_snps.sid_count
logging.debug("block_count={0}, block_size={1}".format(block_count,block_size))
#!!!what about missing data in covar, in test_snps, in y
covar = np.c_[covar.read(view_ok=True,order='A').val,np.ones((test_snps.iid_count, 1))] #view_ok because np.c_ will allocation new memory
y = pheno.read(view_ok=True,order='A').val #view_ok because this code already did a fresh read to look for any missing values
def mapper(start):
logging.info("single_snp_linereg reading start={0},block_size={1}".format(start,block_size))
snp_index = np.arange(start,min(start+block_size,test_snps.sid_count))
x = test_snps[:,start:start+block_size].read().standardize().val
logging.info("single_snp_linereg linreg")
_,pval_in = lin_reg.f_regression_cov_alt(x,y,covar)
logging.info("single_snp_linereg done")
pval_in = pval_in.reshape(-1)
if max_output_len is None:
return pval_in,snp_index
else: #We only need to return the top max_output_len results
sort_index = np.argsort(pval_in)[:max_output_len]
return pval_in[sort_index],snp_index[sort_index]
def reducer(pval_and_snp_index_sequence):
pval_list = []
snp_index_list = []
for pval, snp_index in pval_and_snp_index_sequence:
pval_list.append(pval)
snp_index_list.append(snp_index)
pval = np.concatenate(pval_list)
snp_index = np.concatenate(snp_index_list)
sort_index = np.argsort(pval)
if max_output_len is not None:
sort_index = sort_index[:max_output_len]
index = snp_index[sort_index]
dataframe = pd.DataFrame(
index=np.arange(len(index)),
columns=('sid_index', 'SNP', 'Chr', 'GenDist', 'ChrPos', 'PValue')
)
#!!Is this the only way to set types in a dataframe?
dataframe['sid_index'] = dataframe['sid_index'].astype(np.float)
dataframe['Chr'] = dataframe['Chr'].astype(np.float)
dataframe['GenDist'] = dataframe['GenDist'].astype(np.float)
dataframe['ChrPos'] = dataframe['ChrPos'].astype(np.float)
dataframe['PValue'] = dataframe['PValue'].astype(np.float)
dataframe['sid_index'] = index
dataframe['SNP'] = test_snps.sid[index]
dataframe['Chr'] = test_snps.pos[index,0]
dataframe['GenDist'] = test_snps.pos[index,1]
dataframe['ChrPos'] = test_snps.pos[index,2]
dataframe['PValue'] = pval[sort_index]
if output_file_name is not None:
dataframe.to_csv(output_file_name, sep="\t", index=False)
return dataframe
dataframe = map_reduce(xrange(0,test_snps.sid_count,block_size),
mapper=mapper,
reducer=reducer,
input_files=[test_snps,pheno,covar],
output_files=[output_file_name],
name = "single_snp_linreg",
runner=runner)
return dataframe
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import doctest
doctest.testmod()
| apache-2.0 |
meteokid/python-rpn | share/examples/plot-irregular.py | 1 | 4140 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Kristjan Onu <Kristjan.Onu@canada.ca>
"""
Use Basemap to plot data from a RPN Standard File on an X grid-type
Usage:
# Define CMCGRIDF
. ssmuse-sh -d cmoi/base/20160901
plot-irregular.py
See Also:
https://basemaptutorial.readthedocs.org/en/latest/
"""
import os
import sys
import datetime
import scipy.constants
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.basemap as basemap
import rpnpy.librmn.all as rmn
if __name__ == "__main__":
forecast_name = datetime.date.today().strftime('%Y%m%d') + '12_040'
try:
CMCGRIDF = os.environ['CMCGRIDF'].strip()
except KeyError:
sys.stderr.write('Error: CMCGRIDF environment variable undefined. Before Python, execute:\n')
sys.stderr.write('. ssmuse-sh -d cmoi/base/20160901\n')
sys.exit(1)
my_file = os.path.join(CMCGRIDF, 'prog', 'gsloce', forecast_name)
try:
funit = rmn.fstopenall(my_file, rmn.FST_RO)
except:
sys.stderr.write('Error: Unable to open file: '+my_file+'\n')
sys.exit(1)
varname = 'tm2'
try:
sst_rec = rmn.fstlir(funit, nomvar=varname, typvar='P@')
sst_mask_rec = rmn.fstlir(funit, nomvar=varname, typvar='@@')
except:
sys.stderr.write('Error: Problem reading fields '+varname+' in file: '+my_file+'\n')
sys.exit(1)
# Prefered method to get grid lat, lon. Works on any RPNSTD grid
# type (except 'X')
## try:
## sst_rec['iunit'] = funit
## sst_gridid = rmn.ezqkdef(sst_rec) # use ezscint to retreive full grid
## gridLatLon = rmn.gdll(sst_gridid)
## lat = gridLatLon['lat']
## lon = gridLatLon['lon']
## except:
## sys.stderr.write('Error: Problem getting grid info for '+varname+' in file: '+my_file+'\n')
## sys.exit(1)
# Less prefered method to get grid lat, lon since it relies on
# reverse engeneering of grid def and ^^ >> values would only work
# with L grids encoded as a Z grid and when only one set of ^^ >>
# are in the file In this case it's the only way since the fields
# are not properly geo-referenced (grtyp='X')
try:
lat = rmn.fstlir(funit, nomvar='^^', dtype=np.float32)['d']
lon = rmn.fstlir(funit, nomvar='>>', dtype=np.float32)['d']
except:
sys.stderr.write('Error: Problem getting grid info for '+varname+' in file: '+my_file+'\n')
sys.exit(1)
rmn.fstcloseall(funit)
sst = np.ma.array(sst_rec['d'], mask=np.logical_not(sst_mask_rec['d']))
crnr = np.array([[-72.5, 45], [-55, 52.5]])
lat_ts = np.mean(crnr[:, 1])
bmap = basemap.Basemap(projection='merc', llcrnrlat=crnr[0, 1],
urcrnrlat=crnr[1, 1], llcrnrlon=crnr[0, 0],
urcrnrlon=crnr[1, 0], lat_ts=lat_ts,
resolution='h')
bmap.drawcoastlines()
bmap.fillcontinents(color='.75', lake_color='1')
parallels = np.linspace(crnr[0, 1], crnr[1, 1], 5).astype('int')
bmap.drawparallels(parallels, labels=[True, False, False, False])
meridians = np.linspace(crnr[0, 0], crnr[1, 0], 5).astype('int')
bmap.drawmeridians(meridians, labels=[False, False, True, False])
bmap.drawmapboundary(fill_color='1')
bmap.drawmapscale(crnr[0, 0] + .1*np.diff(crnr[:, 0]),
crnr[0, 1] + .1*np.diff(crnr[:, 1]),
np.mean(crnr[:, 0]), np.mean(crnr[:, 1]),
100, barstyle='fancy')
x, y = bmap(lon, lat)
sst = scipy.constants.K2C(sst)
sst_a = bmap.pcolormesh(x, y, sst)
subsample = 5
lines = bmap.plot(x.ravel()[::subsample], y.ravel()[::subsample])
plt.setp(lines[0], 'linestyle', 'None')
plt.setp(lines[0], 'marker', ',')
plt.setp(lines[0], 'markerfacecolor', 'black')
cbar = bmap.colorbar(sst_a, location='bottom', pad='5%')
cbar.set_label(u'°C')
plt.show()
outfile = os.path.basename(os.path.splitext(__file__)[0]) + '.png'
plt.savefig(outfile, dpi=300, transparent=True, bbox_inches='tight')
| lgpl-2.1 |
kingtaurus/cs224d | assignment2/test_confusion.py | 1 | 1352 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
confusion = np.array([[42452, 27, 45, 175, 60],
[ 255, 1636, 12, 152, 39],
[ 317, 26, 863, 42, 20],
[ 598, 73, 31, 1319, 71],
[ 546, 24, 3, 49, 2527]], dtype=np.int32)
cm = confusion.copy()
cm = cm.astype('float') / cm.sum(axis=1, keepdims=True)
# cm *= 255
# cm = cm.astype('uint8')
cm = cm[:, :]
print(cm)
plt.figure()
cmap1 = mpl.colors.ListedColormap(sns.color_palette("coolwarm", 100))
# print(sns.color_palette(sns.color_palette("coolwarm", 100)))
#plt.cm.Blues
plt.imshow(cm, interpolation='nearest', cmap=cmap1)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(["a", "b", "c", "d", "e"]))
plt.xticks(tick_marks, ["a", "b", "c", "d", "e"], rotation=45)
plt.yticks(tick_marks, ["a", "b", "c", "d", "e"])
plt.gca().xaxis.grid(b=False)
plt.gca().yaxis.grid(b=False)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('cm.png')
plt.show()
# conf_matrix = tf.image_summary("confusion_matrix" + str(epoch), tf.convert_to_tensor(confusion.astype(np.float32)))
# conf_summary = session.run(conf_matrix)
# model.summary_writer.add_summary(conf_summary, epoch)
| mit |
woodscn/scipy | scipy/spatial/tests/test__plotutils.py | 55 | 1567 | from __future__ import division, print_function, absolute_import
from numpy.testing import dec, assert_, assert_array_equal
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
voronoi_plot_2d(obj, show_vertices=False)
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/core/categorical.py | 1 | 66886 | # pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import types
from pandas import compat, lib
from pandas.compat import u
from pandas.core.algorithms import factorize, take_1d
from pandas.core.base import (PandasObject, PandasDelegate,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util.decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
from pandas.core.common import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex, isnull, notnull,
is_dtype_equal, is_categorical_dtype, is_integer_dtype,
_possibly_infer_to_datetimelike, is_list_like,
is_sequence, is_null_slice, is_bool, _ensure_object, _ensure_int64,
_coerce_indexer_dtype)
from pandas.types.api import CategoricalDtype
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same
if ((len(self.categories) != len(other.categories)) or
not ((self.categories == other.categories).all())):
raise TypeError("Categoricals can only be compared if "
"'categories' are the same")
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
na_mask = (self._codes == -1) | (other._codes == -1)
f = getattr(self._codes, op)
ret = f(other._codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if lib.isscalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def maybe_to_categorical(array):
""" coerce to a categorical if a series is given """
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
return array
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
_categories_doc = """The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be unique and
the number of items in the new categories must be the same as the number of
items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the number of new
categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
class Categorical(PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of values.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If not given, the resulting categorical will not be ordered.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> from pandas import Categorical
>>> Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]
>>> Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a < b < c]
>>> a = Categorical(['a','b','c','a','b','c'], ['c', 'b', 'a'],
ordered=True)
>>> a.min()
'c'
"""
dtype = CategoricalDtype()
"""The dtype (always "category")"""
"""Whether or not this Categorical is ordered.
Only ordered `Categoricals` can be sorted (according to the order
of the categories) and have a min and max value.
See also
--------
Categorical.sort
Categorical.order
Categorical.min
Categorical.max
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=False, name=None,
fastpath=False, levels=None):
if fastpath:
# fast path
self._codes = _coerce_indexer_dtype(values, categories)
self._categories = self._validate_categories(
categories, fastpath=isinstance(categories, ABCIndexClass))
self._ordered = ordered
return
if name is not None:
msg = ("the 'name' keyword is removed, use 'name' with consumers "
"of the categorical instead (e.g. 'Series(cat, "
"name=\"something\")'")
warn(msg, UserWarning, stacklevel=2)
# TODO: Remove after deprecation period in 2017/ after 0.18
if levels is not None:
warn("Creating a 'Categorical' with 'levels' is deprecated, use "
"'categories' instead", FutureWarning, stacklevel=2)
if categories is None:
categories = levels
else:
raise ValueError("Cannot pass in both 'categories' and "
"(deprecated) 'levels', use only "
"'categories'", stacklevel=2)
# sanitize input
if is_categorical_dtype(values):
# we are either a Series or a CategoricalIndex
if isinstance(values, (ABCSeries, ABCCategoricalIndex)):
values = values._values
if ordered is None:
ordered = values.ordered
if categories is None:
categories = values.categories
values = values.__array__()
elif isinstance(values, (ABCIndexClass, ABCSeries)):
pass
else:
# on numpy < 1.6 datetimelike get inferred to all i8 by
# _sanitize_array which is fine, but since factorize does this
# correctly no need here this is an issue because _sanitize_array
# also coerces np.nan to a string under certain versions of numpy
# as well
values = _possibly_infer_to_datetimelike(values,
convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# On list with NaNs, int values will be converted to float. Use
# "object" dtype to prevent this. In the end objects will be
# casted to int/... in the category assignment step.
dtype = 'object' if isnull(values).any() else None
values = _sanitize_array(values, None, dtype=dtype)
if categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
categories = self._validate_categories(categories)
else:
# there were two ways if categories are present
# - the old one, where each value is a int pointer to the levels
# array -> not anymore possible, but code outside of pandas could
# call us like that, so make some checks
# - the new one, where each value is also in the categories array
# (or np.nan)
# make sure that we always have the same type here, no matter what
# we get passed in
categories = self._validate_categories(categories)
codes = _get_codes_for_values(values, categories)
# TODO: check for old style usage. These warnings should be removes
# after 0.18/ in 2016
if is_integer_dtype(values) and not is_integer_dtype(categories):
warn("Values and categories have different dtypes. Did you "
"mean to use\n'Categorical.from_codes(codes, "
"categories)'?", RuntimeWarning, stacklevel=2)
if (len(values) and is_integer_dtype(values) and
(codes == -1).all()):
warn("None of the categories were found in values. Did you "
"mean to use\n'Categorical.from_codes(codes, "
"categories)'?", RuntimeWarning, stacklevel=2)
self.set_ordered(ordered or False, inplace=True)
self._categories = categories
self._codes = _coerce_indexer_dtype(codes, categories)
def copy(self):
""" Copy constructor. """
return Categorical(values=self._codes.copy(),
categories=self.categories, ordered=self.ordered,
fastpath=True)
def astype(self, dtype):
""" coerce this type to another dtype """
if is_categorical_dtype(dtype):
return self
return np.array(self, dtype=dtype)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def reshape(self, new_shape, *args, **kwargs):
"""
An ndarray-compatible method that returns
`self` because categorical instances cannot
actually be reshaped.
"""
nv.validate_reshape(args, kwargs)
return self
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def from_array(cls, data, **kwargs):
"""
Make a Categorical type from a single array-like object.
For internal compatibility with numpy arrays.
Parameters
----------
data : array-like
Can be an Index or array-like. The categories are assumed to be
the unique values of `data`.
"""
return Categorical(data, **kwargs)
@classmethod
def from_codes(cls, codes, categories, ordered=False, name=None):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
if name is not None:
msg = ("the 'name' keyword is removed, use 'name' with consumers "
"of the categorical instead (e.g. 'Series(cat, "
"name=\"something\")'")
warn(msg, UserWarning, stacklevel=2)
try:
codes = np.asarray(codes, np.int64)
except:
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = cls._validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return Categorical(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _get_labels(self):
"""
Get the category labels (deprecated).
Deprecated, use .codes!
"""
warn("'labels' is deprecated. Use 'codes' instead", FutureWarning,
stacklevel=2)
return self.codes
labels = property(fget=_get_labels, fset=_set_codes)
_categories = None
@classmethod
def _validate_categories(cls, categories, fastpath=False):
"""
Validates that we have good categories
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
"""
if not isinstance(categories, ABCIndexClass):
dtype = None
if not hasattr(categories, "dtype"):
categories = _convert_to_list_like(categories)
# On categories with NaNs, int values would be converted to
# float. Use "object" dtype to prevent this.
if isnull(categories).any():
without_na = np.array([x for x in categories
if notnull(x)])
with_na = np.array(categories)
if with_na.dtype != without_na.dtype:
dtype = "object"
from pandas import Index
categories = Index(categories, dtype=dtype)
if not fastpath:
# check properties of the categories
# we don't allow NaNs in the categories themselves
if categories.hasnans:
# NaNs in cats deprecated in 0.17,
# remove in 0.18 or 0.19 GH 10748
msg = ('\nSetting NaNs in `categories` is deprecated and '
'will be removed in a future version of pandas.')
warn(msg, FutureWarning, stacklevel=3)
# categories must be unique
if not categories.is_unique:
raise ValueError('Categorical categories must be unique')
return categories
def _set_categories(self, categories, fastpath=False):
""" Sets new categories
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
"""
categories = self._validate_categories(categories, fastpath=fastpath)
if (not fastpath and self._categories is not None and
len(categories) != len(self._categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._categories = categories
def _get_categories(self):
""" Gets the categories """
# categories is an Index, which is immutable -> no need to copy
return self._categories
categories = property(fget=_get_categories, fset=_set_categories,
doc=_categories_doc)
def _set_levels(self, levels):
""" set new levels (deprecated, use "categories") """
warn("Assigning to 'levels' is deprecated, use 'categories'",
FutureWarning, stacklevel=2)
self.categories = levels
def _get_levels(self):
""" Gets the levels (deprecated, use "categories") """
warn("Accessing 'levels' is deprecated, use 'categories'",
FutureWarning, stacklevel=2)
return self.categories
# TODO: Remove after deprecation period in 2017/ after 0.18
levels = property(fget=_get_levels, fset=_set_levels)
_ordered = None
def _set_ordered(self, value):
""" Sets the ordered attribute to the boolean value """
warn("Setting 'ordered' directly is deprecated, use 'set_ordered'",
FutureWarning, stacklevel=2)
self.set_ordered(value, inplace=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
if not is_bool(value):
raise TypeError("ordered must be a boolean value")
cat = self if inplace else self.copy()
cat._ordered = value
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
return self.set_ordered(False, inplace=inplace)
def _get_ordered(self):
""" Gets the ordered attribute """
return self._ordered
ordered = property(fget=_get_ordered, fset=_set_ordered)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
new_categories = self._validate_categories(new_categories)
cat = self if inplace else self.copy()
if rename:
if (cat._categories is not None and
len(new_categories) < len(cat._categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_categories)] = -1
else:
values = cat.__array__()
cat._codes = _get_codes_for_values(values, new_categories)
cat._categories = new_categories
if ordered is None:
ordered = self.ordered
cat.set_ordered(ordered, inplace=True)
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
The new categories has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Raises
------
ValueError
If the new categories do not have the same number of items than the
current categories or do not validate as categories
Parameters
----------
new_categories : Index-like
The renamed categories.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical with renamed categories added or None if inplace.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
cat = self if inplace else self.copy()
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
if set(self._categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self._categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: %s" %
str(already_included))
raise ValueError(msg)
new_categories = list(self._categories) + list(new_categories)
cat = self if inplace else self.copy()
cat._categories = self._validate_categories(new_categories)
cat._codes = _coerce_indexer_dtype(cat._codes, new_categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self._categories)
new_categories = [c for c in self._categories if c not in removal_set]
# GH 10156
if any(isnull(removals)):
not_included = [x for x in not_included if notnull(x)]
new_categories = [x for x in new_categories if notnull(x)]
if len(not_included) != 0:
raise ValueError("removals must all be in old categories: %s" %
str(not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
cat._codes = inv
cat._categories = cat.categories.take(idx)
if not inplace:
return cat
def map(self, mapper):
"""
Apply mapper function to its categories (not codes).
Parameters
----------
mapper : callable
Function to be applied. When all categories are mapped
to different categories, the result will be Categorical which has
the same order property as the original. Otherwise, the result will
be np.ndarray.
Returns
-------
applied : Categorical or np.ndarray.
"""
new_categories = self.categories.map(mapper)
try:
return Categorical.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesnt make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, com._ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return Categorical.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_codes' not in state and 'labels' in state:
state['_codes'] = state.pop('labels')
if '_categories' not in state and '_levels' in state:
state['_categories'] = self._validate_categories(state.pop(
'_levels'))
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self._categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self._categories.memory_usage(deep=deep)
@Substitution(klass='Categorical', value='v')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, v, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = self.categories.values.searchsorted(
Series(v).values, side=side)
return self.codes.searchsorted(values_as_codes, sorter=sorter)
def isnull(self):
"""
Detect missing values
Both missing values (-1 in .codes) and NA as a category are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
pandas.isnull : pandas version
Categorical.notnull : boolean inverse of Categorical.isnull
"""
ret = self._codes == -1
# String/object and float categories can hold np.nan
if self.categories.dtype.kind in ['S', 'O', 'f']:
if np.nan in self.categories:
nan_pos = np.where(isnull(self.categories))[0]
# we only have one NA in categories
ret = np.logical_or(ret, self._codes == nan_pos)
return ret
def notnull(self):
"""
Reverse of isnull
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
pandas.notnull : pandas version
Categorical.isnull : boolean inverse of Categorical.notnull
"""
return ~self.isnull()
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Both missing values (-1 in .codes) and NA as a category are detected.
NA is removed from the categories if present.
Returns
-------
valid : Categorical
"""
result = self[self.notnull()]
if isnull(result.categories).any():
result = result.remove_categories([np.nan])
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN, even if NaN is a category.
Returns
-------
counts : Series
"""
from numpy import bincount
from pandas.core.common import isnull
from pandas.core.series import Series
from pandas.core.index import CategoricalIndex
obj = (self.remove_categories([np.nan]) if dropna and
isnull(self.categories).any() else self)
code, cat = obj._codes, obj.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = Categorical(ix, categories=cat, ordered=obj.ordered,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if com.is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def argsort(self, ascending=True, *args, **kwargs):
"""
Returns the indices that would sort the Categorical instance if
'sort_values' was called. This function is implemented to provide
compatibility with numpy ndarray objects.
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
"""
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
result = np.argsort(self._codes.copy(), **kwargs)
if not ascending:
result = result[::-1]
return result
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return Categorical(values=codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
def order(self, inplace=False, ascending=True, na_position='last'):
"""
DEPRECATED: use :meth:`Categorical.sort_values`. That function
is entirely equivalent to this one.
See Also
--------
Categorical.sort_values
"""
warn("order is deprecated, use sort_values(...)", FutureWarning,
stacklevel=2)
return self.sort_values(inplace=inplace, ascending=ascending,
na_position=na_position)
def sort(self, inplace=True, ascending=True, na_position='last', **kwargs):
"""
DEPRECATED: use :meth:`Categorical.sort_values`. That function
is just like this one, except that a new Categorical is returned
by default, so make sure to pass in 'inplace=True' to get
inplace sorting.
See Also
--------
Categorical.sort_values
"""
warn("sort is deprecated, use sort_values(...)", FutureWarning,
stacklevel=2)
nv.validate_sort(tuple(), kwargs)
return self.sort_values(inplace=inplace, ascending=ascending,
na_position=na_position)
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
value : scalar
Value to use to fill holes (e.g. 0)
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
values = self._codes
# Make sure that we also get NA in categories
if self.categories.dtype.kind in ['S', 'O', 'f']:
if np.nan in self.categories:
values = values.copy()
nan_pos = np.where(isnull(self.categories))[0]
# we only have one NA in categories
values[values == nan_pos] = -1
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
values = _get_codes_for_values(values, self.categories)
else:
if not isnull(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = values == -1
if mask.any():
values = values.copy()
values[mask] = self.categories.get_loc(value)
return Categorical(values, categories=self.categories,
ordered=self.ordered, fastpath=True)
def take_nd(self, indexer, allow_fill=True, fill_value=None):
""" Take the codes by the indexer, fill with the fill_value.
For internal compatibility with numpy arrays.
"""
# filling must always be None/nan here
# but is passed thru internally
assert isnull(fill_value)
codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)
result = Categorical(codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
_codes = self._codes[slicer]
return Categorical(values=_codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values())
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = '%s, ..., %s' % (head[:-1], tail[1:])
if footer:
result = '%s\n%s' % (result, self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories (%d, %s): " % (len(self.categories), dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if com.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: %d\n%s') % (len(self), self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
result = ('[], %s' %
self._get_repr(length=False,
footer=True, ).replace("\n", ", "))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return Categorical(values=self._codes[key],
categories=self.categories,
ordered=self.ordered,
fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isnull(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pydata/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
# FIXME: the following can be removed after GH7820 is fixed:
# https://github.com/pydata/pandas/issues/7820
# float categories do currently return -1 for np.nan, even if np.nan is
# included in the index -> "repair" this here
if isnull(rvalue).any() and isnull(self.categories).any():
nan_pos = np.where(isnull(self.categories))[0]
lindexer[lindexer == -1] = nan_pos
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
# reduction ops #
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation """
func = getattr(self, name, None)
if func is None:
raise TypeError("Categorical cannot perform the operation "
"{op}".format(op=name))
return func(numeric_only=numeric_only, **kwds)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self):
"""
Returns the mode(s) of the Categorical.
Empty if nothing occurs at least 2 times. Always returns `Categorical`
even if only one value.
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas.hashtable as htable
good = self._codes != -1
values = sorted(htable.mode_int64(_ensure_int64(self._codes[good])))
result = Categorical(values=values, categories=self.categories,
ordered=self.ordered, fastpath=True)
return result
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
"""
from pandas.core.nanops import unique1d
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = sorted(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
return (self.is_dtype_equal(other) and
np.array_equal(self._codes, other._codes))
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return (self.categories.equals(other.categories) and
self.ordered == other.ordered)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.tools.merge import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return Categorical(values=codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
# The Series.cat accessor
class CategoricalAccessor(PandasDelegate, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, values, index):
self.categorical = values
self.index = index
self._freeze()
def _delegate_property_get(self, name):
return getattr(self.categorical, name)
def _delegate_property_set(self, name, new_values):
return setattr(self.categorical, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self.categorical.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self.categorical, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index)
CategoricalAccessor._add_delegate_accessors(delegate=Categorical,
accessors=["categories",
"ordered"],
typ='property')
CategoricalAccessor._add_delegate_accessors(delegate=Categorical, accessors=[
"rename_categories", "reorder_categories", "add_categories",
"remove_categories", "remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"], typ='method')
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if not is_dtype_equal(values.dtype, categories.dtype):
values = _ensure_object(values)
categories = _ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return _coerce_indexer_dtype(t.lookup(vals), cats)
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
isinstance(list_like, types.GeneratorType)):
return list(list_like)
elif lib.isscalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
| mit |
pprett/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 50 | 7817 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
https://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
https://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
tawsifkhan/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.