repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
fabianp/scikit-learn | sklearn/neighbors/unsupervised.py | 106 | 4461 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
| bsd-3-clause |
vishnumani2009/OpenSource-Open-Ended-Statistical-toolkit | FRONTEND/pyroc.py | 2 | 12161 | #!/usr/bin/env python
# encoding: utf-8
"""
PyRoc.py
Created by Marcel Caraciolo on 2009-11-16.
Copyright (c) 2009 Federal University of Pernambuco. All rights reserved.
IMPORTANT:
Based on the original code by Eithon Cadag (http://www.eithoncadag.com/files/pyroc.txt)
Python Module for calculating the area under the receive operating characteristic curve, given a dataset.
0.1 - First Release
0.2 - Updated the code by adding new metrics for analysis with the confusion matrix.
"""
import random
import math
try:
import pylab
except:
print "error:\tcan't import pylab module, you must install the module:\n"
print "\tmatplotlib to plot charts!'\n"
def random_mixture_model(pos_mu=.6,pos_sigma=.1,neg_mu=.4,neg_sigma=.1,size=200):
pos = [(1,random.gauss(pos_mu,pos_sigma),) for x in xrange(size/2)]
neg = [(0,random.gauss(neg_mu,neg_sigma),) for x in xrange(size/2)]
return pos+neg
def plot_multiple_rocs_separate(rocList,title='', labels = None, equal_aspect = True):
""" Plot multiples ROC curves as separate at the same painting area. """
pylab.clf()
pylab.title(title)
for ix, r in enumerate(rocList):
ax = pylab.subplot(4,4,ix+1)
pylab.ylim((0,1))
pylab.xlim((0,1))
ax.set_yticklabels([])
ax.set_xticklabels([])
if equal_aspect:
cax = pylab.gca()
cax.set_aspect('equal')
if not labels:
labels = ['' for x in rocList]
pylab.text(0.2,0.1,labels[ix],fontsize=8)
pylab.plot([x[0] for x in r.derived_points],[y[1] for y in r.derived_points], 'r-',linewidth=2)
pylab.show()
def _remove_duplicate_styles(rocList):
""" Checks for duplicate linestyles and replaces duplicates with a random one."""
pref_styles = ['cx-','mx-','yx-','gx-','bx-','rx-']
points = 'ov^>+xd'
colors = 'bgrcmy'
lines = ['-','-.',':']
rand_ls = []
for r in rocList:
if r.linestyle not in rand_ls:
rand_ls.append(r.linestyle)
else:
while True:
if len(pref_styles) > 0:
pstyle = pref_styles.pop()
if pstyle not in rand_ls:
r.linestyle = pstyle
rand_ls.append(pstyle)
break
else:
ls = ''.join(random.sample(colors,1) + random.sample(points,1)+ random.sample(lines,1))
if ls not in rand_ls:
r.linestyle = ls
rand_ls.append(ls)
break
def plot_multiple_roc(rocList,title='',labels=None, include_baseline=False, equal_aspect=True):
""" Plots multiple ROC curves on the same chart.
Parameters:
rocList: the list of ROCData objects
title: The tile of the chart
labels: The labels of each ROC curve
include_baseline: if it's True include the random baseline
equal_aspect: keep equal aspect for all roc curves
"""
pylab.clf()
pylab.ylim((0,1))
pylab.xlim((0,1))
pylab.xticks(pylab.arange(0,1.1,.1))
pylab.yticks(pylab.arange(0,1.1,.1))
pylab.grid(True)
if equal_aspect:
cax = pylab.gca()
cax.set_aspect('equal')
pylab.xlabel("1 - Specificity")
pylab.ylabel("Sensitivity")
pylab.title(title)
if not labels:
labels = [ '' for x in rocList]
_remove_duplicate_styles(rocList)
for ix, r in enumerate(rocList):
pylab.plot([x[0] for x in r.derived_points], [y[1] for y in r.derived_points], r.linestyle, linewidth=1, label=labels[ix])
if include_baseline:
pylab.plot([0.0,1.0], [0.0, 1.0], 'k-', label= 'random')
if labels:
pylab.legend(loc='lower right')
pylab.show()
def load_decision_function(path):
""" Function to load the decision function (DataSet)
Parameters:
path: The dataset file path
Return:
model_data: The data modeled
"""
fileHandler = open(path,'r')
reader = fileHandler.readlines()
reader = [line.strip().split() for line in reader]
model_data = []
for line in reader:
if len(line) == 0: continue
fClass,fValue = line
model_data.append((int(fClass), float(fValue)))
fileHandler.close()
return model_data
class ROCData(object):
""" Class that generates an ROC Curve for the data.
Data is in the following format: a list l of tutples t
where:
t[0] = 1 for positive class and t[0] = 0 for negative class
t[1] = score
t[2] = label
"""
def __init__(self,data,linestyle='rx-'):
""" Constructor takes the data and the line style for plotting the ROC Curve.
Parameters:
data: The data a listl of tuples t (l = [t_0,t_1,...t_n]) where:
t[0] = 1 for positive class and 0 for negative class
t[1] = a score
t[2] = any label (optional)
lineStyle: THe matplotlib style string for plots.
Note: The ROCData is still usable w/o matplotlib. The AUC is still available,
but plots cannot be generated.
"""
self.data = sorted(data,lambda x,y: cmp(y[1],x[1]))
self.linestyle = linestyle
self.auc() #Seed initial points with default full ROC
def auc(self,fpnum=0):
""" Uses the trapezoidal ruel to calculate the area under the curve. If fpnum is supplied, it will
calculate a partial AUC, up to the number of false positives in fpnum (the partial AUC is scaled
to between 0 and 1).
It assumes that the positive class is expected to have the higher of the scores (s(+) < s(-))
Parameters:
fpnum: The cumulativr FP count (fps)
Return:
"""
fps_count = 0
relevant_pauc = []
current_index = 0
max_n = len([x for x in self.data if x[0] == 0])
if fpnum == 0:
relevant_pauc = [x for x in self.data]
elif fpnum > max_n:
fpnum = max_n
#Find the upper limit of the data that does not exceed n FPs
else:
while fps_count < fpnum:
relevant_pauc.append(self.data[current_index])
if self.data[current_index][0] == 0:
fps_count += 1
current_index +=1
total_n = len([x for x in relevant_pauc if x[0] == 0])
total_p = len(relevant_pauc) - total_n
#Convert to points in a ROC
previous_df = -1000000.0
current_index = 0
points = []
tp_count, fp_count = 0.0 , 0.0
tpr, fpr = 0, 0
while current_index < len(relevant_pauc):
df = relevant_pauc[current_index][1]
if previous_df != df:
points.append((fpr,tpr,fp_count))
if relevant_pauc[current_index][0] == 0:
fp_count +=1
elif relevant_pauc[current_index][0] == 1:
tp_count +=1
fpr = fp_count/total_n
tpr = tp_count/total_p
previous_df = df
current_index +=1
points.append((fpr,tpr,fp_count)) #Add last point
points.sort(key=lambda i: (i[0],i[1]))
self.derived_points = points
return self._trapezoidal_rule(points)
def _trapezoidal_rule(self,curve_pts):
""" Method to calculate the area under the ROC curve"""
cum_area = 0.0
for ix,x in enumerate(curve_pts[0:-1]):
cur_pt = x
next_pt = curve_pts[ix+1]
cum_area += ((cur_pt[1]+next_pt[1])/2.0) * (next_pt[0]-cur_pt[0])
return cum_area
def calculateStandardError(self,fpnum=0):
""" Returns the standard error associated with the curve.
Parameters:
fpnum: The cumulativr FP count (fps)
Return:
the standard error.
"""
area = self.auc(fpnum)
#real positive cases
Na = len([ x for x in self.data if x[0] == 1])
#real negative cases
Nn = len([ x for x in self.data if x[0] == 0])
Q1 = area / (2.0 - area)
Q2 = 2 * area * area / (1.0 + area)
return math.sqrt( ( area * (1.0 - area) + (Na - 1.0) * (Q1 - area*area) +
(Nn - 1.0) * (Q2 - area * area)) / (Na * Nn))
def plot(self,title='',include_baseline=False,equal_aspect=True):
""" Method that generates a plot of the ROC curve
Parameters:
title: Title of the chart
include_baseline: Add the baseline plot line if it's True
equal_aspect: Aspects to be equal for all plot
"""
pylab.clf()
pylab.plot([x[0] for x in self.derived_points], [y[1] for y in self.derived_points], self.linestyle)
if include_baseline:
pylab.plot([0.0,1.0], [0.0,1.0],'k-.')
pylab.ylim((0,1))
pylab.xlim((0,1))
pylab.xticks(pylab.arange(0,1.1,.1))
pylab.yticks(pylab.arange(0,1.1,.1))
pylab.grid(True)
if equal_aspect:
cax = pylab.gca()
cax.set_aspect('equal')
pylab.xlabel('1 - Specificity')
pylab.ylabel('Sensitivity')
pylab.title(title)
pylab.show()
def confusion_matrix(self,threshold,do_print=False):
""" Returns the confusion matrix (in dictionary form) for a fiven threshold
where all elements > threshold are considered 1 , all else 0.
Parameters:
threshold: threshold to check the decision function
do_print: if it's True show the confusion matrix in the screen
Return:
the dictionary with the TP, FP, FN, TN
"""
pos_points = [x for x in self.data if x[1] >= threshold]
neg_points = [x for x in self.data if x[1] < threshold]
tp,fp,fn,tn = self._calculate_counts(pos_points,neg_points)
if do_print:
print "\t Actual class"
print "\t+(1)\t-(0)"
print "+(1)\t%i\t%i\tPredicted" % (tp,fp)
print "-(0)\t%i\t%i\tclass" % (fn,tn)
return {'TP': tp, 'FP': fp, 'FN': fn, 'TN': tn}
def evaluateMetrics(self,matrix,metric=None,do_print=False):
""" Returns the metrics evaluated from the confusion matrix.
Parameters:
matrix: the confusion matrix
metric: the specific metric of the default value is None (all metrics).
do_print: if it's True show the metrics in the screen
Return:
the dictionary with the Accuracy, Sensitivity, Specificity,Efficiency,
PositivePredictiveValue, NegativePredictiveValue, PhiCoefficient
"""
accuracy = (matrix['TP'] + matrix['TN'])/ float(sum(matrix.values()))
sensitivity = (matrix['TP'])/ float(matrix['TP'] + matrix['FN'])
specificity = (matrix['TN'])/float(matrix['TN'] + matrix['FP'])
efficiency = (sensitivity + specificity) / 2.0
positivePredictiveValue = matrix['TP'] / float(matrix['TP'] + matrix['FP'])
NegativePredictiveValue = matrix['TN'] / float(matrix['TN'] + matrix['FN'])
PhiCoefficient = (matrix['TP'] * matrix['TN'] - matrix['FP'] * matrix['FN'])/(
math.sqrt( (matrix['TP'] + matrix['FP']) *
(matrix['TP'] + matrix['FN']) *
(matrix['TN'] + matrix['FP']) *
(matrix['TN'] + matrix['FN']))) or 1.0
if do_print:
print 'Sensitivity: ' , sensitivity
print 'Specificity: ' , specificity
print 'Efficiency: ' , efficiency
print 'Accuracy: ' , accuracy
print 'PositivePredictiveValue: ' , positivePredictiveValue
print 'NegativePredictiveValue' , NegativePredictiveValue
print 'PhiCoefficient' , PhiCoefficient
return {'SENS': sensitivity, 'SPEC': specificity, 'ACC': accuracy, 'EFF': efficiency,
'PPV':positivePredictiveValue, 'NPV':NegativePredictiveValue , 'PHI': PhiCoefficient}
def _calculate_counts(self,pos_data,neg_data):
""" Calculates the number of false positives, true positives, false negatives and true negatives """
tp_count = len([x for x in pos_data if x[0] == 1])
fp_count = len([x for x in pos_data if x[0] == 0])
fn_count = len([x for x in neg_data if x[0] == 1])
tn_count = len([x for x in neg_data if x[0] == 0])
return tp_count,fp_count,fn_count, tn_count
if __name__ == '__main__':
print "PyRoC - ROC Curve Generator"
print "By Marcel Pinheiro Caraciolo (@marcelcaraciolo)"
print "http://aimotion.bogspot.com\n"
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-f', '--file', dest='origFile', help="Path to a file with the class and decision function. The first column of each row is the class, and the second the decision score.")
parser.add_option("-n", "--max fp", dest = "fp_n", default=0, help= "Maximum false positives to calculate up to (for partial AUC).")
parser.add_option("-p","--plot", action="store_true",dest='plotFlag', default=False, help="Plot the ROC curve (matplotlib required)")
parser.add_option("-t",'--title', dest= 'ptitle' , default='' , help = 'Title of plot.')
(options,args) = parser.parse_args()
if (not options.origFile):
parser.print_help()
exit()
df_data = load_decision_function(options.origFile)
roc = ROCData(df_data)
roc_n = int(options.fp_n)
print "ROC AUC: %s" % (str(roc.auc(roc_n)),)
print 'Standard Error: %s' % (str(roc.calculateStandardError(roc_n)),)
print ''
for pt in roc.derived_points:
print pt[0],pt[1]
if options.plotFlag:
roc.plot(options.ptitle,True,True)
| gpl-3.0 |
dpsfotocestou/SkyDrop | skydrop/utils/serial_chart/chart_3D.py | 5 | 2332 | import serial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def add_line(name, x, index):
item = {}
item["name"] = name
item["data"] = np.zeros(len(x))
item["index"] = index
item["axis"] = False
return item
time = np.arange(2)
y = []
y.append(add_line("x", time, 3))
y.append(add_line("y", time, 4))
y.append(add_line("z", time, 5))
index = 01
s = serial.Serial("/dev/ttyUSB0", 921600)
fig = plt.figure(1, figsize=(15,13))
ax = fig.add_subplot(111, projection='3d')
axis_x, = ax.plot(time, y[0]["data"], y[1]["data"], "r")
axis_y, = ax.plot(time, y[0]["data"], y[1]["data"], "g")
axis_z, = ax.plot(time, y[0]["data"], y[1]["data"], "b")
single, = ax.plot(time, y[0]["data"], y[1]["data"], "k-o", lw=5, zs = y[2]["data"])
ax.set_autoscale_on(True)
leg = ["x", "y", "z", "sum"]
plt.legend(leg)
plt.ion()
plt.show()
v_min = 100000
v_max = -100000
skip = 0
while True:
line = s.readline()
data = line.split(";")
# print data
for line in y:
val = 0
try:
tmp = data[line["index"]]
val = float(tmp)
except:
print "Err"
line["data"][index] = val
if val > v_max:
v_max = val
if val < v_min:
v_min = val
'''draw'''
if skip % 20 == 0:
single.set_xdata(y[0]["data"])
single.set_ydata(y[1]["data"])
single.set_3d_properties(zs = y[2]["data"])
x_data = [0, y[0]["data"][1]]
y_data = [0, 0]
axis_x.set_xdata(x_data)
axis_x.set_ydata(y_data)
axis_x.set_3d_properties(zs = [0,0])
x_data = [y[0]["data"][1], y[0]["data"][1]]
y_data = [0, y[1]["data"][1]]
axis_y.set_xdata(x_data)
axis_y.set_ydata(y_data)
axis_y.set_3d_properties(zs = [0,0])
x_data = [y[0]["data"][1], y[0]["data"][1]]
y_data = [y[1]["data"][1], y[1]["data"][1]]
axis_z.set_xdata(x_data)
axis_z.set_ydata(y_data)
axis_z.set_3d_properties(zs = y[2]["data"])
ax.set_ylim([-v_max, v_max])
ax.set_xlim([-v_max, v_max])
ax.set_zlim([-v_max, v_max])
ax.figure.canvas.draw()
skip += 1
# index = 1
| gpl-2.0 |
dominiktomicevic/pedestrian | classifier/extractor.py | 1 | 6723 | from itertools import product, repeat, chain, ifilter, imap
from multiprocessing import Pool, cpu_count
from sklearn.preprocessing import binarize
from utils.profiling import profile
from numpy.random import randint
from functools import partial
from random import sample
import numpy as np
import logging
logger = logging.getLogger(__name__)
def on_edge(mask, x, y):
""" checks if the point defined with coordinates x and y lies on the edge
of a pedestrian on the pedestrian mask
to lie on an edge is defined by having at least one non-pedestrian
pixel in the surrouding of the given pixel (left, right, up or down)
e.g. we're checking if the middle pixel is an edge one
0 1 1
... 0 1 1 ... -> on_edge? True
0 0 1
0 1 1
... 1 1 1 ... -> on_edge? False
0 1 1
mask: array-like
two dimensional array of binarized pixels
x: int
point coordinate on the x-axis
y: int
point coordinate on the y-axis
returns: bool
a boolean stating weather the point lies on the edge of the
pedestrian
"""
return mask[x, y] and not all([mask[x + dx, y + dy] for dx, dy in
zip([-1, 1, 0, 0], [0, 0, -1, 1])])
def window(image, d, x, y):
""" extracts a window of size d pixels in each direction from the point
defined by x and y coordinates
total number of pixels in the window is (2 * d + 1)^2
window is binarized if not already binary and reshaped to a vector
e.g. d = 2, P(x, y)
0 0 0 0 0 0 0 0 0 0
-----------
0 | 0 0 0 0 0 | 0 0 1 1
0 | 0 0 0 0 0 | 1 1 1 1
0 | 0 0 P 0 1 | 1 1 1 1
0 | 0 0 0 0 1 | 1 1 1 1
0 | 0 0 0 1 1 | 1 1 1 1
-----------
0 0 0 1 1 1 1 1 1 1
0 0 0 0 1 1 1 1 1 1
0 0 0 1 1 1 1 1 1 1
image: array-like
two dimensional array
d: int
number of pixels to take in each direction from the center
x: int
x coordinate of the window center
y: int
x coordinate of the window center
returns: array
a binary array with (2 * d + 1)^2 elements
"""
b, g, r = image
w = lambda img: img[(x - d):(x + d + 1), (y - d):(y + d + 1)].reshape(-1)
return binarize(np.hstack([w(b), w(g), w(r)]), 0.5)
def samples(s, d):
""" generates classifier input samples from a dataset sample
s: object
compatible with the sample class interface
d: int
number of pixels to take in each direction from the center when
generating an input sample via the moving window
return: list of tuples
tuples contain two elements, an input vector and a target vector
extracted from a given sample
"""
shape = s.proc[0].shape
# generate a cartesian product of all possible point coordinates given
# image shape and offset d
# filter out all points not representing pedestrian mask edges
# compute input vectors for all remaining points from their respective
# windows
positive = imap(lambda xy: (window(s.proc, d, *xy), 1),
ifilter(lambda xy: on_edge(s.mask, *xy),
product(*map(lambda x: xrange(d, x - d),
shape))))
# create an infinite uniform random sampling list of point coordinates
# inside the given image
# filter out all points representing positive examples to get an infinite
# list of point coordinates representing negative examples
# compute input vectors for all points from their respective windows
negative = imap(lambda xy: (window(s.proc, d, *xy), 0),
ifilter(lambda xy: not s.mask.item(*xy),
imap(lambda o: map(lambda x: randint(d, x - d), o),
repeat(shape))))
# zip a finite list of positive examples and an infinite list of negative
# examples to get an equal amount of positive and negative examples and has
# a length of len(positive)
# chain all the zipped elements to get a flattened list of examples
# containing both positive and negative examples in one list
return list(chain(*zip(positive, negative)))
def generate(dataset, w, threaded=True):
""" generate a list of classifier data samples from all dataset samples
with a parallel implementation using a thread pool
dataset: object
object containing samples list compatible with the sample class
interface
w: int
size of the window used to extract features from an image
must be an odd number
returns: iterator
iterator contains all the positive and negative data samples
generated from the dataset
"""
if not threaded:
logger.info('extracting samples using 1 thread')
return chain(*map(partial(samples, d=(w - 1) / 2), dataset.samples))
logger.info('extracting samples using {0} threads'.format(cpu_count()))
return chain(*pool.map(partial(samples, d=(w - 1) / 2), dataset.samples))
@profile
def extract(dataset, w=11, N=25000, threaded=True):
""" extracts the training inputs and targets from the dataset
dataset: object
object containing samples list compatible with the sample class
interface
w: int
size of the window used to extract features from an image
must be an odd number
N: int
the number of samples to extract from the dataset. samples are
extracted randomly from the list of all possible samples
must be positive
returns: tuple of numpy arrays
the tuple contains two numpy arrays, one represents an input two
dimensional array and the other one represents a target vector
"""
assert(w % 2 == 1)
assert(N > 0)
# generates a list of data samples used in the model training
#
# randomly samples the list of samples and returns a maximum of
# N data samples as tuples of (input, target) vectors
#
# zips the sample tuples to divide input vectors in a separate tuple and
# target vectors in a separate tuple
ins, ts = zip(*sample(list(generate(dataset, w, threaded=threaded)), N))
# vertically concatenates list of numpy arrays and concatenates a list
# of target vectors to a numpy array
return (np.vstack(ins), np.array(ts))
# process pool for concurrent sample generation
pool = Pool(cpu_count())
| mit |
shyamalschandra/scikit-learn | sklearn/model_selection/_split.py | 8 | 55300 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Girsel <olivier.grisel@ensta.org>
# Raghav R V <rvraghav93@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import inspect
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..base import _pprint
from ..gaussian_process.kernels import Kernel as GPKernel
__all__ = ['BaseCrossValidator',
'KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'LabelShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, labels):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, labels=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, labels)
"""
for test_index in self._iter_test_indices(X, y, labels):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, labels=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_folds=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def _iter_test_indices(self, X, y=None, labels=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_folds=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, labels=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold and StratifiedKFold"""
@abstractmethod
def __init__(self, n_folds, shuffle, random_state):
if not isinstance(n_folds, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_folds, type(n_folds)))
n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,), optional
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
n_samples = _num_samples(X)
if self.n_folds > n_samples:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(self.n_folds,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, labels):
yield train, test
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_folds
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_folds=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_folds=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_folds`` folds have size
``n_samples // n_folds + 1``, other folds have size
``n_samples // n_folds``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes label information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_folds, shuffle, random_state)
self.shuffle = shuffle
def _iter_test_indices(self, X, y=None, labels=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_folds = self.n_folds
fold_sizes = (n_samples // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n_samples % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(n_folds=2)
>>> label_kfold.get_n_splits(X, y, labels)
2
>>> print(label_kfold)
LabelKFold(n_folds=2)
>>> for train_index, test_index in label_kfold.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_folds=3):
super(LabelKFold, self).__init__(n_folds, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if self.n_folds > n_labels:
raise ValueError("Cannot have number of folds n_folds=%d greater"
" than the number of labels: %d."
% (self.n_folds, n_labels))
# Weight labels by their number of occurences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
indices = label_to_fold[labels]
for f in range(self.n_folds):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_folds=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_folds=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_folds)``, the last one has
the complementary.
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_folds, shuffle, random_state)
self.shuffle = shuffle
def _make_test_folds(self, X, y=None, labels=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_labels = np.min(y_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_folds)) as data to the KFold
per_cls_cvs = [
KFold(self.n_folds, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_folds)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_folds)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, labels=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_folds):
yield test_folds == i
class LeaveOneLabelOut(BaseCrossValidator):
"""Leave One Label Out cross-validator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneLabelOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = LeaveOneLabelOut()
>>> lol.get_n_splits(X, y, labels)
2
>>> print(lol)
LeaveOneLabelOut()
>>> for train_index, test_index in lol.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
# We make a copy of labels to avoid side-effects during iteration
labels = np.array(labels, copy=True)
unique_labels = np.unique(labels)
for i in unique_labels:
yield labels == i
def get_n_splits(self, X, y, labels):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if labels is None:
raise ValueError("The labels parameter should not be None")
return len(np.unique(labels))
class LeavePLabelOut(BaseCrossValidator):
"""Leave P Labels Out cross-validator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_labels : int
Number of labels (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePLabelOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = LeavePLabelOut(n_labels=2)
>>> lpl.get_n_splits(X, y, labels)
3
>>> print(lpl)
LeavePLabelOut(n_labels=2)
>>> for train_index, test_index in lpl.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n_labels):
self.n_labels = n_labels
def _iter_test_masks(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
labels = np.array(labels, copy=True)
unique_labels = np.unique(labels)
combi = combinations(range(len(unique_labels)), self.n_labels)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_labels[np.array(indices)]:
test_index[labels == l] = True
yield test_index
def get_n_splits(self, X, y, labels):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if labels is None:
raise ValueError("The labels parameter should not be None")
return int(comb(len(np.unique(labels)), self.n_labels, exact=True))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
for train, test in self._iter_indices(X, y, labels):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, labels=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_iter
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_iter=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_iter=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_iter=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, labels=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(p=10)`` would be
``LabelShuffleSplit(test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
super(LabelShuffleSplit, self).__init__(
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
classes, label_indices = np.unique(labels, return_inverse=True)
for label_train, label_test in super(
LabelShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(label_indices, label_train))
test = np.flatnonzero(np.in1d(label_indices, label_test))
yield train, test
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_iter=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_iter=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_iter, test_size, train_size, random_state)
def _iter_indices(self, X, y, labels=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
p_i = class_counts / float(n_samples)
n_i = np.round(n_train * p_i).astype(int)
t_i = np.minimum(class_counts - n_i,
np.round(n_test * p_i).astype(int))
for _ in range(self.n_iter):
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < n_train or len(test) < n_test:
# We complete by affecting randomly the missing indexes
missing_indices = np.where(bincount(train + test,
minlength=len(y)) == 0)[0]
missing_indices = rng.permutation(missing_indices)
train.extend(missing_indices[:(n_train - len(train))])
test.extend(missing_indices[-(n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i'
and test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i'
and train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = cv
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv) # Both iterables and old-cv objects support len
def split(self, X=None, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If classifier is False or if ``y`` is
neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or and iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
.. versionadded:: 0.16
preserves input type instead of always casting to numpy array.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
Output type is the same as the input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if (hasattr(estimator, 'kernel') and callable(estimator.kernel) and
not isinstance(estimator.kernel, GPKernel)):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[index] for index in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
if init is object.__init__:
# No explicit constructor to introspect
args = []
else:
args = sorted(inspect.getargspec(init)[0])
if 'self' in args:
args.remove('self')
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
StingraySoftware/stingray | stingray/deadtime/model.py | 1 | 4595 | from stingray.utils import njit, prange
import numpy as np
import matplotlib.pyplot as plt
from astropy import log
try:
from scipy.special import factorial
except ImportError:
from scipy.misc import factorial
__FACTORIALS = factorial(np.arange(160))
def r_in(td, r_0):
"""Calculate incident countrate given dead time and detected countrate."""
tau = 1 / r_0
return 1. / (tau - td)
def r_det(td, r_i):
"""Calculate detected countrate given dead time and incident countrate."""
tau = 1 / r_i
return 1. / (tau + td)
@njit()
def Gn(x, n):
"""Term in Eq. 34 in Zhang+95."""
s = 0
for l in range(0, n):
s += (n - l) / __FACTORIALS[l] * x**l
return np.exp(-x) * s
@njit()
def heaviside(x):
"""Heaviside function. Returns 1 if x>0, and 0 otherwise.
Examples
--------
>>> heaviside(2)
1
>>> heaviside(-1)
0
"""
if x >= 0:
return 1
else:
return 0
@njit()
def h(k, n, td, tb, tau):
"""Term in Eq. 35 in Zhang+95."""
# Typo in Zhang+95 corrected. k * tb, not k * td
if k * tb < n * td:
return 0
return (k - n*(td + tau) / tb +
tau / tb * Gn((k * tb - n * td)/tau, n))
INFINITE = 100
@njit()
def A0(r0, td, tb, tau):
"""Term in Eq. 38 in Zhang+95."""
s = 0
for n in range(1, INFINITE):
s += h(1, n, td, tb, tau)
return r0 * tb * (1 + 2 * s)
@njit()
def A(k, r0, td, tb, tau):
"""Term in Eq. 39 in Zhang+95."""
if k == 0:
return A0(r0, td, tb, tau)
# Equation 39
s = 0
for n in range(1, INFINITE):
s += h(k + 1, n, td, tb, tau) - 2 * h(k, n, td, tb, tau) + h(k - 1, n, td, tb, tau)
return r0 * tb * s
def check_A(rate, td, tb, max_k=100, save_to=None):
"""Test that A is well-behaved.
Check that Ak ->r0**2tb**2 for k->infty, as per Eq. 43 in
Zhang+95.
"""
tau = 1 / rate
r0 = r_det(td, rate)
value = r0 ** 2 * tb**2
fig = plt.figure()
for k in range(max_k):
plt.scatter(k, A(k, r0, td, tb, tau), color='k')
plt.axhline(value, ls='--', color='k')
plt.xlabel('$k$')
plt.ylabel('$A_k$')
if save_to is not None:
plt.savefig(save_to)
plt.close(fig)
@njit()
def B(k, r0, td, tb, tau):
"""Term in Eq. 45 in Zhang+95."""
if k == 0:
return 2 * (A(0, r0, td, tb, tau) - r0**2 * tb**2) / (r0*tb)
return 4 * (A(k, r0, td, tb, tau) - r0**2 * tb**2) / (r0*tb)
@njit()
def safe_B(k, r0, td, tb, tau, limit_k=60):
"""Term in Eq. 39 in Zhang+95, with a cut in the maximum k.
This can be risky. Only use if B is really 0 for high k.
"""
if k > limit_k:
return 0
return B(k, r0, td, tb, tau)
def check_B(rate, td, tb, max_k=100, save_to=None):
"""Check that B->0 for k->infty."""
tau = 1 / rate
r0 = r_det(td, rate)
fig = plt.figure()
for k in range(max_k):
plt.scatter(k, B(k, r0, td, tb, tau), color='k')
plt.axhline(0, ls='--', color='k')
plt.xlabel('$k$')
plt.ylabel('$B_k$')
if save_to is not None:
plt.savefig(save_to)
plt.close(fig)
@njit(parallel=True)
def _inner_loop_pds_zhang(N, tau, r0, td, tb, limit_k=60):
"""Calculate the power spectrum, as per Eq. 44 in Zhang+95."""
P = np.zeros(N // 2)
for j in prange(N//2):
eq8_sum = 0
for k in range(1, N):
eq8_sum += (N - k) / N * safe_B(
k, r0, td, tb, tau,
limit_k=limit_k) * np.cos(2 * np.pi * j * k / N)
P[j] = safe_B(0, r0, td, tb, tau) + eq8_sum
return P
def pds_model_zhang(N, rate, td, tb, limit_k=60):
"""Calculate the dead-time-modified power spectrum.
Parameters
----------
N : int
The number of spectral bins
rate : float
Incident count rate
td : float
Dead time
tb : float
Bin time of the light curve
Other Parameters
----------------
limit_k : int
Limit to this value the number of terms in the inner loops of
calculations. Check the plots returned by the `check_B` and
`check_A` functions to test that this number is adequate.
Returns
-------
freqs : array of floats
Frequency array
power : array of floats
Power spectrum
"""
tau = 1 / rate
r0 = r_det(td, rate)
# Nph = N / tau
log.info("Calculating PDS model (update)")
P = _inner_loop_pds_zhang(N, tau, r0, td, tb, limit_k=limit_k)
maxf = 0.5 / tb
df = maxf / len(P)
freqs = np.arange(0, maxf, df)
return freqs, P
| mit |
bsipocz/statsmodels | statsmodels/examples/ex_multivar_kde.py | 34 | 1504 |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import statsmodels.api as sm
"""
This example illustrates the nonparametric estimation of a
bivariate bi-modal distribution that is a mixture of two normal
distributions.
author: George Panterov
"""
if __name__ == '__main__':
np.random.seed(123456)
# generate the data
nobs = 500
BW = 'cv_ml'
mu1 = [3, 4]
mu2 = [6, 1]
cov1 = np.asarray([[1, 0.7], [0.7, 1]])
cov2 = np.asarray([[1, -0.7], [-0.7, 1]])
ix = np.random.uniform(size=nobs) > 0.5
V = np.random.multivariate_normal(mu1, cov1, size=nobs)
V[ix, :] = np.random.multivariate_normal(mu2, cov2, size=nobs)[ix, :]
x = V[:, 0]
y = V[:, 1]
dens = sm.nonparametric.KDEMultivariate(data=[x, y], var_type='cc', bw=BW,
defaults=sm.nonparametric.EstimatorSettings(efficient=True))
supportx = np.linspace(min(x), max(x), 60)
supporty = np.linspace(min(y), max(y), 60)
X, Y = np.meshgrid(supportx, supporty)
edat = np.column_stack([X.ravel(), Y.ravel()])
Z = dens.pdf(edat).reshape(X.shape)
# plot
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.figure(2)
plt.imshow(Z)
plt.show()
| bsd-3-clause |
waddell/urbansim | urbansim/models/regression.py | 5 | 33858 | """
Use the ``RegressionModel`` class to fit a model using statsmodels'
OLS capability and then do subsequent prediction.
"""
from __future__ import print_function
import logging
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from patsy import dmatrix
from prettytable import PrettyTable
from zbox import toolz as tz
from . import util
from ..exceptions import ModelEvaluationError
from ..utils import yamlio
from ..utils.logutil import log_start_finish
logger = logging.getLogger(__name__)
def fit_model(df, filters, model_expression):
"""
Use statsmodels OLS to construct a model relation.
Parameters
----------
df : pandas.DataFrame
Data to use for fit. Should contain all the columns
referenced in the `model_expression`.
filters : list of str
Any filters to apply before doing the model fit.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
Returns
-------
fit : statsmodels.regression.linear_model.OLSResults
"""
df = util.apply_filter_query(df, filters)
model = smf.ols(formula=model_expression, data=df)
if len(model.exog) != len(df):
raise ModelEvaluationError(
'Estimated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
with log_start_finish('statsmodels OLS fit', logger):
return model.fit()
def predict(df, filters, model_fit, ytransform=None):
"""
Apply model to new data to predict new dependent values.
Parameters
----------
df : pandas.DataFrame
filters : list of str
Any filters to apply before doing prediction.
model_fit : statsmodels.regression.linear_model.OLSResults
Result of model estimation.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `df`
after applying filters.
"""
df = util.apply_filter_query(df, filters)
with log_start_finish('statsmodels predict', logger):
sim_data = model_fit.predict(df)
if len(sim_data) != len(df):
raise ModelEvaluationError(
'Predicted data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
if ytransform:
sim_data = ytransform(sim_data)
return pd.Series(sim_data, index=df.index)
def _rhs(model_expression):
"""
Get only the right-hand side of a patsy model expression.
Parameters
----------
model_expression : str
Returns
-------
rhs : str
"""
if '~' not in model_expression:
return model_expression
else:
return model_expression.split('~')[1].strip()
class _FakeRegressionResults(object):
"""
This can be used in place of a statsmodels RegressionResults
for limited purposes when it comes to model prediction.
Intended for use when loading a model from a YAML representation;
we can do model evaluation using the stored coefficients, but can't
recreate the original statsmodels fit result.
Parameters
----------
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
fit_parameters : pandas.DataFrame
Stats results from fitting `model_expression` to data.
Should include columns 'Coefficient', 'Std. Error', and 'T-Score'.
rsquared : float
rsquared_adj : float
"""
def __init__(self, model_expression, fit_parameters, rsquared,
rsquared_adj):
self.model_expression = model_expression
self.params = fit_parameters['Coefficient']
self.bse = fit_parameters['Std. Error']
self.tvalues = fit_parameters['T-Score']
self.rsquared = rsquared
self.rsquared_adj = rsquared_adj
@property
def _rhs(self):
"""
Get only the right-hand side of `model_expression`.
"""
return _rhs(self.model_expression)
def predict(self, data):
"""
Predict new values by running data through the fit model.
Parameters
----------
data : pandas.DataFrame
Table with columns corresponding to the RHS of `model_expression`.
Returns
-------
predicted : ndarray
Array of predicted values.
"""
with log_start_finish('_FakeRegressionResults prediction', logger):
model_design = dmatrix(
self._rhs, data=data, return_type='dataframe')
return model_design.dot(self.params).values
def _model_fit_to_table(fit):
"""
Produce a pandas DataFrame of model fit results from a statsmodels
fit result object.
Parameters
----------
fit : statsmodels.regression.linear_model.RegressionResults
Returns
-------
fit_parameters : pandas.DataFrame
Will have columns 'Coefficient', 'Std. Error', and 'T-Score'.
Index will be model terms.
This frame will also have non-standard attributes
.rsquared and .rsquared_adj with the same meaning and value
as on `fit`.
"""
fit_parameters = pd.DataFrame(
{'Coefficient': fit.params,
'Std. Error': fit.bse,
'T-Score': fit.tvalues})
fit_parameters.rsquared = fit.rsquared
fit_parameters.rsquared_adj = fit.rsquared_adj
return fit_parameters
YTRANSFORM_MAPPING = {
None: None,
np.exp: 'np.exp',
'np.exp': np.exp,
np.log: 'np.log',
'np.log': np.log,
np.log1p: 'np.log1p',
'np.log1p': np.log1p,
np.expm1: 'np.expm1',
'np.expm1': np.expm1
}
class RegressionModel(object):
"""
A hedonic (regression) model with the ability to store an
estimated model and predict new data based on the model.
statsmodels' OLS implementation is used.
Parameters
----------
fit_filters : list of str
Filters applied before fitting the model.
predict_filters : list of str
Filters applied before calculating new data points.
model_expression : str or dict
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
name : optional
Optional descriptive name for this model that may be used
in output.
"""
def __init__(self, fit_filters, predict_filters, model_expression,
ytransform=None, name=None):
self.fit_filters = fit_filters
self.predict_filters = predict_filters
self.model_expression = model_expression
self.ytransform = ytransform
self.name = name or 'RegressionModel'
self.model_fit = None
self.fit_parameters = None
self.est_data = None
@classmethod
def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a RegressionModel instance from a saved YAML configuration.
Arguments are mutually exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
RegressionModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
model = cls(
cfg['fit_filters'],
cfg['predict_filters'],
cfg['model_expression'],
YTRANSFORM_MAPPING[cfg['ytransform']],
cfg['name'])
if 'fitted' in cfg and cfg['fitted']:
fit_parameters = pd.DataFrame(cfg['fit_parameters'])
fit_parameters.rsquared = cfg['fit_rsquared']
fit_parameters.rsquared_adj = cfg['fit_rsquared_adj']
model.model_fit = _FakeRegressionResults(
model.str_model_expression,
fit_parameters,
cfg['fit_rsquared'], cfg['fit_rsquared_adj'])
model.fit_parameters = fit_parameters
logger.debug('loaded regression model {} from YAML'.format(model.name))
return model
@property
def str_model_expression(self):
"""
Model expression as a string suitable for use with patsy/statsmodels.
"""
return util.str_model_expression(
self.model_expression, add_constant=True)
def fit(self, data, debug=False):
"""
Fit the model to data and store/return the results.
Parameters
----------
data : pandas.DataFrame
Data to use for fitting the model. Must contain all the
columns referenced by the `model_expression`.
debug : bool
If debug is set to true, this sets the attribute "est_data"
to a dataframe with the actual data used for estimation of
this model.
Returns
-------
fit : statsmodels.regression.linear_model.OLSResults
This is returned for inspection, but also stored on the
class instance for use during prediction.
"""
with log_start_finish('fitting model {}'.format(self.name), logger):
fit = fit_model(data, self.fit_filters, self.str_model_expression)
self.model_fit = fit
self.fit_parameters = _model_fit_to_table(fit)
if debug:
index = util.apply_filter_query(data, self.fit_filters).index
assert len(fit.model.exog) == len(index), (
"The estimate data is unequal in length to the original "
"dataframe, usually caused by nans")
df = pd.DataFrame(
fit.model.exog, columns=fit.model.exog_names, index=index)
df[fit.model.endog_names] = fit.model.endog
df["fittedvalues"] = fit.fittedvalues
df["residuals"] = fit.resid
self.est_data = df
return fit
@property
def fitted(self):
"""
True if the model is ready for prediction.
"""
return self.model_fit is not None
def assert_fitted(self):
"""
Raises a RuntimeError if the model is not ready for prediction.
"""
if not self.fitted:
raise RuntimeError('Model has not been fit.')
def report_fit(self):
"""
Print a report of the fit results.
"""
if not self.fitted:
print('Model not yet fit.')
return
print('R-Squared: {0:.3f}'.format(self.model_fit.rsquared))
print('Adj. R-Squared: {0:.3f}'.format(self.model_fit.rsquared_adj))
print('')
tbl = PrettyTable(
['Component', ])
tbl = PrettyTable()
tbl.add_column('Component', self.fit_parameters.index.values)
for col in ('Coefficient', 'Std. Error', 'T-Score'):
tbl.add_column(col, self.fit_parameters[col].values)
tbl.align['Component'] = 'l'
tbl.float_format = '.3'
print(tbl)
def predict(self, data):
"""
Predict a new data set based on an estimated model.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must contain all the columns
referenced by the right-hand side of the `model_expression`.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `data`
after applying filters.
"""
self.assert_fitted()
with log_start_finish('predicting model {}'.format(self.name), logger):
return predict(
data, self.predict_filters, self.model_fit, self.ytransform)
def to_dict(self):
"""
Returns a dictionary representation of a RegressionModel instance.
"""
d = {
'model_type': 'regression',
'name': self.name,
'fit_filters': self.fit_filters,
'predict_filters': self.predict_filters,
'model_expression': self.model_expression,
'ytransform': YTRANSFORM_MAPPING[self.ytransform],
'fitted': self.fitted,
'fit_parameters': None,
'fit_rsquared': None,
'fit_rsquared_adj': None
}
if self.fitted:
d['fit_parameters'] = yamlio.frame_to_yaml_safe(
self.fit_parameters)
d['fit_rsquared'] = float(self.model_fit.rsquared)
d['fit_rsquared_adj'] = float(self.model_fit.rsquared_adj)
return d
def to_yaml(self, str_or_buffer=None):
"""
Save a model respresentation to YAML.
Parameters
----------
str_or_buffer : str or file like, optional
By default a YAML string is returned. If a string is
given here the YAML will be written to that file.
If an object with a ``.write`` method is given the
YAML will be written to that object.
Returns
-------
j : str
YAML string if `str_or_buffer` is not given.
"""
logger.debug(
'serializing regression model {} to YAML'.format(self.name))
return yamlio.convert_to_yaml(self.to_dict(), str_or_buffer)
def columns_used(self):
"""
Returns all the columns used in this model for filtering
and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.model_expression))))
@classmethod
def fit_from_cfg(cls, df, cfgname, debug=False):
"""
Parameters
----------
df : DataFrame
The dataframe which contains the columns to use for the estimation.
cfgname : string
The name of the yaml config file which describes the hedonic model.
debug : boolean, optional (default False)
Whether to generate debug information on the model.
Returns
-------
RegressionModel which was used to fit
"""
logger.debug('start: fit from configuration {}'.format(cfgname))
hm = cls.from_yaml(str_or_buffer=cfgname)
ret = hm.fit(df, debug=debug)
print(ret.summary())
hm.to_yaml(str_or_buffer=cfgname)
logger.debug('start: fit from configuration {}'.format(cfgname))
return hm
@classmethod
def predict_from_cfg(cls, df, cfgname):
"""
Parameters
----------
df : DataFrame
The dataframe which contains the columns to use for the estimation.
cfgname : string
The name of the yaml config file which describes the hedonic model.
Returns
-------
predicted : pandas.Series
Predicted data in a pandas Series. Will have the index of `data`
after applying filters and minus any groups that do not have
models.
hm : RegressionModel which was used to predict
"""
logger.debug('start: predict from configuration {}'.format(cfgname))
hm = cls.from_yaml(str_or_buffer=cfgname)
price_or_rent = hm.predict(df)
print(price_or_rent.describe())
logger.debug('start: predict from configuration {}'.format(cfgname))
return price_or_rent, hm
class RegressionModelGroup(object):
"""
Manages a group of regression models that refer to different segments
within a single table.
Model names must match the segment names after doing a Pandas groupby.
Parameters
----------
segmentation_col
Name of the column on which to segment.
name
Optional name used to identify the model in places.
"""
def __init__(self, segmentation_col, name=None):
self.segmentation_col = segmentation_col
self.name = name if name is not None else 'RegressionModelGroup'
self.models = {}
def add_model(self, model):
"""
Add a `RegressionModel` instance.
Parameters
----------
model : `RegressionModel`
Should have a ``.name`` attribute matching one of
the groupby segments.
"""
logger.debug(
'adding model {} to group {}'.format(model.name, self.name))
self.models[model.name] = model
def add_model_from_params(self, name, fit_filters, predict_filters,
model_expression, ytransform=None):
"""
Add a model by passing arguments through to `RegressionModel`.
Parameters
----------
name : any
Must match a groupby segment name.
fit_filters : list of str
Filters applied before fitting the model.
predict_filters : list of str
Filters applied before calculating new data points.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
"""
logger.debug(
'adding model {} to group {}'.format(name, self.name))
model = RegressionModel(
fit_filters, predict_filters, model_expression, ytransform, name)
self.models[name] = model
def _iter_groups(self, data):
"""
Iterate over the groups in `data` after grouping by
`segmentation_col`. Skips any groups for which there
is no model stored.
Yields tuples of (name, df) where name is the group key
and df is the group DataFrame.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
"""
groups = data.groupby(self.segmentation_col)
for name in self.models:
yield name, groups.get_group(name)
def fit(self, data, debug=False):
"""
Fit each of the models in the group.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true (default false) will pass the debug parameter
to model estimation.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
with log_start_finish(
'fitting models in group {}'.format(self.name), logger):
return {name: self.models[name].fit(df, debug=debug)
for name, df in self._iter_groups(data)}
@property
def fitted(self):
"""
Whether all models in the group have been fitted.
"""
return (all(m.fitted for m in self.models.values())
if self.models else False)
def predict(self, data):
"""
Predict new data for each group in the segmentation.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must have a column with the
same name as `segmentation_col`.
Returns
-------
predicted : pandas.Series
Predicted data in a pandas Series. Will have the index of `data`
after applying filters and minus any groups that do not have
models.
"""
with log_start_finish(
'predicting models in group {}'.format(self.name), logger):
results = [self.models[name].predict(df)
for name, df in self._iter_groups(data)]
return pd.concat(results)
def columns_used(self):
"""
Returns all the columns used across all models in the group
for filtering and in the model expression.
"""
return list(tz.unique(tz.concat(
m.columns_used() for m in self.models.values())))
class SegmentedRegressionModel(object):
"""
A regression model group that allows segments to have different
model expressions and ytransforms but all have the same filters.
Parameters
----------
segmentation_col
Name of column in the data table on which to segment. Will be used
with a pandas groupby on the data table.
fit_filters : list of str, optional
Filters applied before fitting the model.
predict_filters : list of str, optional
Filters applied before calculating new data points.
min_segment_size : int
This model will add all segments that have at least this number of
observations. A very small number of observations (e.g. 1) will
cause an error with estimation.
default_model_expr : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
default_ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
min_segment_size : int, optional
Segments with less than this many members will be skipped.
name : str, optional
A name used in places to identify the model.
"""
def __init__(
self, segmentation_col, fit_filters=None, predict_filters=None,
default_model_expr=None, default_ytransform=None,
min_segment_size=0, name=None):
self.segmentation_col = segmentation_col
self._group = RegressionModelGroup(segmentation_col)
self.fit_filters = fit_filters
self.predict_filters = predict_filters
self.default_model_expr = default_model_expr
self.default_ytransform = default_ytransform
self.min_segment_size = min_segment_size
self.name = name if name is not None else 'SegmentedRegressionModel'
@classmethod
def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a SegmentedRegressionModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedRegressionModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
default_model_expr = cfg['default_config']['model_expression']
default_ytransform = cfg['default_config']['ytransform']
seg = cls(
cfg['segmentation_col'], cfg['fit_filters'],
cfg['predict_filters'], default_model_expr,
YTRANSFORM_MAPPING[default_ytransform], cfg['min_segment_size'],
cfg['name'])
if "models" not in cfg:
cfg["models"] = {}
for name, m in cfg['models'].items():
m['model_expression'] = m.get(
'model_expression', default_model_expr)
m['ytransform'] = m.get('ytransform', default_ytransform)
m['fit_filters'] = None
m['predict_filters'] = None
reg = RegressionModel.from_yaml(yamlio.convert_to_yaml(m, None))
seg._group.add_model(reg)
logger.debug(
'loaded segmented regression model {} from yaml'.format(seg.name))
return seg
def add_segment(self, name, model_expression=None, ytransform='default'):
"""
Add a new segment with its own model expression and ytransform.
Parameters
----------
name :
Segment name. Must match a segment in the groupby of the data.
model_expression : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
If not given the default model will be used, which must not be
None.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
If not given the default ytransform will be used.
"""
if not model_expression:
if self.default_model_expr is None:
raise ValueError(
'No default model available, '
'you must supply a model experssion.')
model_expression = self.default_model_expr
if ytransform == 'default':
ytransform = self.default_ytransform
# no fit or predict filters, we'll take care of that this side.
self._group.add_model_from_params(
name, None, None, model_expression, ytransform)
logger.debug('added segment {} to model {}'.format(name, self.name))
def fit(self, data, debug=False):
"""
Fit each segment. Segments that have not already been explicitly
added will be automatically added with default model and ytransform.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true will pass debug to the fit method of each model.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
data = util.apply_filter_query(data, self.fit_filters)
unique = data[self.segmentation_col].unique()
value_counts = data[self.segmentation_col].value_counts()
# Remove any existing segments that may no longer have counterparts
# in the data. This can happen when loading a saved model and then
# calling this method with data that no longer has segments that
# were there the last time this was called.
gone = set(self._group.models) - set(unique)
for g in gone:
del self._group.models[g]
for x in unique:
if x not in self._group.models and \
value_counts[x] > self.min_segment_size:
self.add_segment(x)
with log_start_finish(
'fitting models in segmented model {}'.format(self.name),
logger):
return self._group.fit(data, debug=debug)
@property
def fitted(self):
"""
Whether models for all segments have been fit.
"""
return self._group.fitted
def predict(self, data):
"""
Predict new data for each group in the segmentation.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must have a column with the
same name as `segmentation_col`.
Returns
-------
predicted : pandas.Series
Predicted data in a pandas Series. Will have the index of `data`
after applying filters.
"""
with log_start_finish(
'predicting models in segmented model {}'.format(self.name),
logger):
data = util.apply_filter_query(data, self.predict_filters)
return self._group.predict(data)
def _process_model_dict(self, d):
"""
Remove redundant items from a model's configuration dict.
Parameters
----------
d : dict
Modified in place.
Returns
-------
dict
Modified `d`.
"""
del d['model_type']
del d['fit_filters']
del d['predict_filters']
if d['model_expression'] == self.default_model_expr:
del d['model_expression']
if YTRANSFORM_MAPPING[d['ytransform']] == self.default_ytransform:
del d['ytransform']
d["name"] = yamlio.to_scalar_safe(d["name"])
return d
def to_dict(self):
"""
Returns a dict representation of this instance suitable for
conversion to YAML.
"""
return {
'model_type': 'segmented_regression',
'name': self.name,
'segmentation_col': self.segmentation_col,
'fit_filters': self.fit_filters,
'predict_filters': self.predict_filters,
'min_segment_size': self.min_segment_size,
'default_config': {
'model_expression': self.default_model_expr,
'ytransform': YTRANSFORM_MAPPING[self.default_ytransform]
},
'fitted': self.fitted,
'models': {
yamlio.to_scalar_safe(name):
self._process_model_dict(m.to_dict())
for name, m in self._group.models.items()}
}
def to_yaml(self, str_or_buffer=None):
"""
Save a model respresentation to YAML.
Parameters
----------
str_or_buffer : str or file like, optional
By default a YAML string is returned. If a string is
given here the YAML will be written to that file.
If an object with a ``.write`` method is given the
YAML will be written to that object.
Returns
-------
j : str
YAML string if `str_or_buffer` is not given.
"""
logger.debug(
'serializing segmented regression model {} to yaml'.format(
self.name))
return yamlio.convert_to_yaml(self.to_dict(), str_or_buffer)
def columns_used(self):
"""
Returns all the columns used across all models in the group
for filtering and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.default_model_expr),
self._group.columns_used(),
[self.segmentation_col])))
@classmethod
def fit_from_cfg(cls, df, cfgname, debug=False, min_segment_size=None):
"""
Parameters
----------
df : DataFrame
The dataframe which contains the columns to use for the estimation.
cfgname : string
The name of the yaml config file which describes the hedonic model.
debug : boolean, optional (default False)
Whether to generate debug information on the model.
min_segment_size : int, optional
Set attribute on the model.
Returns
-------
hm : SegmentedRegressionModel which was used to fit
"""
logger.debug('start: fit from configuration {}'.format(cfgname))
hm = cls.from_yaml(str_or_buffer=cfgname)
if min_segment_size:
hm.min_segment_size = min_segment_size
for k, v in hm.fit(df, debug=debug).items():
print("REGRESSION RESULTS FOR SEGMENT %s\n" % str(k))
print(v.summary())
hm.to_yaml(str_or_buffer=cfgname)
logger.debug('finish: fit from configuration {}'.format(cfgname))
return hm
@classmethod
def predict_from_cfg(cls, df, cfgname, min_segment_size=None):
"""
Parameters
----------
df : DataFrame
The dataframe which contains the columns to use for the estimation.
cfgname : string
The name of the yaml config file which describes the hedonic model.
min_segment_size : int, optional
Set attribute on the model.
Returns
-------
predicted : pandas.Series
Predicted data in a pandas Series. Will have the index of `data`
after applying filters and minus any groups that do not have
models.
hm : SegmentedRegressionModel which was used to predict
"""
logger.debug('start: predict from configuration {}'.format(cfgname))
hm = cls.from_yaml(str_or_buffer=cfgname)
if min_segment_size:
hm.min_segment_size = min_segment_size
price_or_rent = hm.predict(df)
print(price_or_rent.describe())
logger.debug('finish: predict from configuration {}'.format(cfgname))
return price_or_rent, hm
| bsd-3-clause |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/tools/tests/test_tools.py | 3 | 17257 | """
Test functions for models.tools
"""
import numpy as np
from numpy.random import standard_normal
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_string_equal, TestCase)
from nose.tools import (assert_true, assert_false, assert_raises)
from statsmodels.datasets import longley
from statsmodels.tools import tools
class TestTools(TestCase):
def test_add_constant_list(self):
x = range(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_1d(self):
x = np.arange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_has_constant1d(self):
x = np.ones(5)
x = tools.add_constant(x)
assert_equal(x, np.ones(5))
def test_add_constant_has_constant2d(self):
x = np.asarray([[1,1,1,1],[1,2,3,4.]])
y = tools.add_constant(x)
assert_equal(x,y)
def test_recipr(self):
X = np.array([[2,1],[-1,0]])
Y = tools.recipr(X)
assert_almost_equal(Y, np.array([[0.5,1],[0,0]]))
def test_recipr0(self):
X = np.array([[2,1],[-4,0]])
Y = tools.recipr0(X)
assert_almost_equal(Y, np.array([[0.5,1],[-0.25,0]]))
def test_rank(self):
X = standard_normal((40,10))
self.assertEquals(tools.rank(X), 10)
X[:,0] = X[:,1] + X[:,2]
self.assertEquals(tools.rank(X), 9)
def test_fullrank(self):
X = standard_normal((40,10))
X[:,0] = X[:,1] + X[:,2]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,9))
self.assertEquals(tools.rank(Y), 9)
X[:,5] = X[:,3] + X[:,4]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,8))
self.assertEquals(tools.rank(Y), 8)
def test_estimable():
rng = np.random.RandomState(20120713)
N, P = (40, 10)
X = rng.normal(size=(N, P))
C = rng.normal(size=(1, P))
isestimable = tools.isestimable
assert_true(isestimable(C, X))
assert_true(isestimable(np.eye(P), X))
for row in np.eye(P):
assert_true(isestimable(row, X))
X = np.ones((40, 2))
assert_true(isestimable([1, 1], X))
assert_false(isestimable([1, 0], X))
assert_false(isestimable([0, 1], X))
assert_false(isestimable(np.eye(2), X))
halfX = rng.normal(size=(N, 5))
X = np.hstack([halfX, halfX])
assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X))
assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X))
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X))
# Test array-like for design
XL = X.tolist()
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL))
# Test ValueError for incorrect number of columns
X = rng.normal(size=(N, 5))
for n in range(1, 4):
assert_raises(ValueError, isestimable, np.ones((n,)), X)
assert_raises(ValueError, isestimable, np.eye(4), X)
class TestCategoricalNumerical(object):
#TODO: use assert_raises to check that bad inputs are taken care of
def __init__(self):
#import string
stringabc = 'abcdefghijklmnopqrstuvwxy'
self.des = np.random.randn(25,2)
self.instr = np.floor(np.arange(10,60, step=2)/10)
x=np.zeros((25,5))
x[:5,0]=1
x[5:10,1]=1
x[10:15,2]=1
x[15:20,3]=1
x[20:25,4]=1
self.dummy = x
structdes = np.zeros((25,1),dtype=[('var1', 'f4'),('var2', 'f4'),
('instrument','f4'),('str_instr','a10')])
structdes['var1'] = self.des[:,0][:,None]
structdes['var2'] = self.des[:,1][:,None]
structdes['instrument'] = self.instr[:,None]
string_var = [stringabc[0:5], stringabc[5:10],
stringabc[10:15], stringabc[15:20],
stringabc[20:25]]
string_var *= 5
self.string_var = np.array(sorted(string_var))
structdes['str_instr'] = self.string_var[:,None]
self.structdes = structdes
self.recdes = structdes.view(np.recarray)
def test_array2d(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],10)
def test_array1d(self):
des = tools.categorical(self.instr)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],6)
def test_array2d_drop(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2, drop=True)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.instr, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='instrument')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='instrument')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
# def test_arraylike2d(self):
# des = tools.categorical(self.structdes.tolist(), col=2)
# test_des = des[:,-5:]
# assert_array_equal(test_des, self.dummy)
# assert_equal(des.shape[1], 9)
# def test_arraylike1d(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr)
# test_dum = dum[:,-5:]
# assert_array_equal(test_dum, self.dummy)
# assert_equal(dum.shape[1], 6)
# def test_arraylike2d_drop(self):
# des = tools.categorical(self.structdes.tolist(), col=2, drop=True)
# test_des = des[:,-5:]
# assert_array_equal(test__des, self.dummy)
# assert_equal(des.shape[1], 8)
# def test_arraylike1d_drop(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr, drop=True)
# assert_array_equal(dum, self.dummy)
# assert_equal(dum.shape[1], 5)
class TestCategoricalString(TestCategoricalNumerical):
# comment out until we have type coercion
# def test_array2d(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],10)
# def test_array1d(self):
# des = tools.categorical(self.instr)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],6)
# def test_array2d_drop(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2, drop=True)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.string_var, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='str_instr')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='str_instr')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_arraylike2d(self):
pass
def test_arraylike1d(self):
pass
def test_arraylike2d_drop(self):
pass
def test_arraylike1d_drop(self):
pass
def test_rec_issue302():
arr = np.rec.fromrecords([[10], [11]], names='group')
actual = tools.categorical(arr)
expected = np.rec.array([(10, 1.0, 0.0), (11, 0.0, 1.0)],
dtype=[('group', int), ('group_10', float), ('group_11', float)])
assert_array_equal(actual, expected)
def test_issue302():
arr = np.rec.fromrecords([[10, 12], [11, 13]], names=['group', 'whatever'])
actual = tools.categorical(arr, col=['group'])
expected = np.rec.array([(10, 12, 1.0, 0.0), (11, 13, 0.0, 1.0)],
dtype=[('group', int), ('whatever', int), ('group_10', float),
('group_11', float)])
assert_array_equal(actual, expected)
def test_pandas_const_series():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=False)
assert_string_equal('const', series.columns[1])
assert_equal(series.var(0)[1], 0)
def test_pandas_const_series_prepend():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=True)
assert_string_equal('const', series.columns[0])
assert_equal(series.var(0)[0], 0)
def test_pandas_const_df():
dta = longley.load_pandas().exog
dta = tools.add_constant(dta, prepend=False)
assert_string_equal('const', dta.columns[-1])
assert_equal(dta.var(0)[-1], 0)
def test_pandas_const_df_prepend():
dta = longley.load_pandas().exog
# regression test for #1025
dta['UNEMP'] /= dta['UNEMP'].std()
dta = tools.add_constant(dta, prepend=True)
assert_string_equal('const', dta.columns[0])
assert_equal(dta.var(0)[0], 0)
def test_chain_dot():
A = np.arange(1,13).reshape(3,4)
B = np.arange(3,15).reshape(4,3)
C = np.arange(5,8).reshape(3,1)
assert_equal(tools.chain_dot(A,B,C), np.array([[1820],[4300],[6780]]))
class TestNanDot(object):
@classmethod
def setupClass(cls):
nan = np.nan
cls.mx_1 = np.array([[nan, 1.], [2., 3.]])
cls.mx_2 = np.array([[nan, nan], [2., 3.]])
cls.mx_3 = np.array([[0., 0.], [0., 0.]])
cls.mx_4 = np.array([[1., 0.], [1., 0.]])
cls.mx_5 = np.array([[0., 1.], [0., 1.]])
cls.mx_6 = np.array([[1., 2.], [3., 4.]])
def test_11(self):
test_res = tools.nan_dot(self.mx_1, self.mx_1)
expected_res = np.array([[ np.nan, np.nan], [ np.nan, 11.]])
assert_array_equal(test_res, expected_res)
def test_12(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_2)
expected_res = np.array([[ nan, nan], [ nan, nan]])
assert_array_equal(test_res, expected_res)
def test_13(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_14(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_41(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_4, self.mx_1)
expected_res = np.array([[ nan, 1.], [ nan, 1.]])
assert_array_equal(test_res, expected_res)
def test_23(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_32(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_3, self.mx_2)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_24(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_25(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_5)
expected_res = np.array([[ 0., nan], [ 0., 5.]])
assert_array_equal(test_res, expected_res)
def test_66(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_6, self.mx_6)
expected_res = np.array([[ 7., 10.], [ 15., 22.]])
assert_array_equal(test_res, expected_res)
| apache-2.0 |
HHammond/PrettyPandas | test/test_pretty_pandas.py | 1 | 2421 | import copy
import pytest
import numpy as np
import pandas as pd
from prettypandas import PrettyPandas
@pytest.fixture()
def dataframe():
np.random.seed(24)
df = pd.DataFrame({'A': np.linspace(1, 10, 10)})
df = pd.concat([df, pd.DataFrame(np.random.randn(10, 4),
columns=list('BCDE'))],
axis=1)
return df
@pytest.fixture()
def prettyframe(dataframe):
return PrettyPandas(dataframe)
def test_creation(dataframe):
PrettyPandas(dataframe)
try:
PrettyPandas(None)
except TypeError:
assert True
p1 = PrettyPandas(dataframe)
assert p1.summary_rows == []
assert p1.summary_cols == []
assert p1.formatters == []
p2 = PrettyPandas(dataframe, summary_rows=['test'])
assert p2.summary_rows == ['test']
assert p1.summary_cols == []
assert p1.formatters == []
def test_data_safety(dataframe):
df1 = copy.deepcopy(dataframe)
df = PrettyPandas(dataframe)
df.total()._apply_summaries()
assert all(dataframe == df1)
assert all(df.data == df1)
def test_summary(dataframe):
p1 = PrettyPandas(dataframe).total()
actual = list(p1.data.sum())
r = p1._apply_summaries()
row = r.iloc[-1]
assert (row == actual).all()
def test_summary_fns(dataframe):
PrettyPandas(dataframe).total()
PrettyPandas(dataframe).average()
PrettyPandas(dataframe).median()
PrettyPandas(dataframe).max()
PrettyPandas(dataframe).min()
out = PrettyPandas(dataframe).total()
assert len(out.summary_rows) == 1
assert len(out.summary_cols) == 0
out = PrettyPandas(dataframe).total(axis=1)
assert len(out.summary_rows) == 0
assert len(out.summary_cols) == 1
out = PrettyPandas(dataframe).total(axis=None)
assert len(out.summary_rows) == 1
assert len(out.summary_cols) == 1
out = PrettyPandas(dataframe).min().max()
assert len(out.summary_rows) == 2
assert len(out.summary_cols) == 0
out = PrettyPandas(dataframe).min().max(axis=1)
assert len(out.summary_rows) == 1
assert len(out.summary_cols) == 1
def test_mulitindex():
df = pd.DataFrame({'A': [1, 2],
'B': [3, 4],
'D': [4, 3],
'C': [6, 7]})
with pytest.raises(ValueError):
output = PrettyPandas(df.set_index(['A', 'B'])).total(axis=1)._apply_summaries()
| mit |
VladimirTyrin/urbansim | urbansim/models/util.py | 5 | 9254 | """
Utilities used within the ``urbansim.models`` package.
"""
import collections
import logging
import numbers
from StringIO import StringIO
from tokenize import generate_tokens, NAME
import numpy as np
import pandas as pd
import patsy
from zbox import toolz as tz
from ..utils.logutil import log_start_finish
logger = logging.getLogger(__name__)
def apply_filter_query(df, filters=None):
"""
Use the DataFrame.query method to filter a table down to the
desired rows.
Parameters
----------
df : pandas.DataFrame
filters : list of str or str, optional
List of filters to apply. Will be joined together with
' and ' and passed to DataFrame.query. A string will be passed
straight to DataFrame.query.
If not supplied no filtering will be done.
Returns
-------
filtered_df : pandas.DataFrame
"""
with log_start_finish('apply filter query: {!r}'.format(filters), logger):
if filters:
if isinstance(filters, str):
query = filters
else:
query = ' and '.join(filters)
return df.query(query)
else:
return df
def _filterize(name, value):
"""
Turn a `name` and `value` into a string expression compatible
the ``DataFrame.query`` method.
Parameters
----------
name : str
Should be the name of a column in the table to which the
filter will be applied.
A suffix of '_max' will result in a "less than" filter,
a suffix of '_min' will result in a "greater than or equal to" filter,
and no recognized suffix will result in an "equal to" filter.
value : any
Value side of filter for comparison to column values.
Returns
-------
filter_exp : str
"""
if name.endswith('_min'):
name = name[:-4]
comp = '>='
elif name.endswith('_max'):
name = name[:-4]
comp = '<'
else:
comp = '=='
result = '{} {} {!r}'.format(name, comp, value)
logger.debug(
'converted name={} and value={} to filter {}'.format(
name, value, result))
return result
def filter_table(table, filter_series, ignore=None):
"""
Filter a table based on a set of restrictions given in
Series of column name / filter parameter pairs. The column
names can have suffixes `_min` and `_max` to indicate
"less than" and "greater than" constraints.
Parameters
----------
table : pandas.DataFrame
Table to filter.
filter_series : pandas.Series
Series of column name / value pairs of filter constraints.
Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
ignore : sequence of str, optional
List of column names that should not be used for filtering.
Returns
-------
filtered : pandas.DataFrame
"""
with log_start_finish('filter table', logger):
ignore = ignore if ignore else set()
filters = [_filterize(name, val)
for name, val in filter_series.iteritems()
if not (name in ignore or
(isinstance(val, numbers.Number) and
np.isnan(val)))]
return apply_filter_query(table, filters)
def concat_indexes(indexes):
"""
Concatenate a sequence of pandas Indexes.
Parameters
----------
indexes : sequence of pandas.Index
Returns
-------
pandas.Index
"""
return pd.Index(np.concatenate(indexes))
def has_constant_expr(expr):
"""
Report whether a model expression has constant specific term.
That is, a term explicitly specying whether the model should or
should not include a constant. (e.g. '+ 1' or '- 1'.)
Parameters
----------
expr : str
Model expression to check.
Returns
-------
has_constant : bool
"""
def has_constant(node):
if node.type == 'ONE':
return True
for n in node.args:
if has_constant(n):
return True
return False
return has_constant(patsy.parse_formula.parse_formula(expr))
def str_model_expression(expr, add_constant=True):
"""
We support specifying model expressions as strings, lists, or dicts;
but for use with patsy and statsmodels we need a string.
This function will take any of those as input and return a string.
Parameters
----------
expr : str, iterable, or dict
A string will be returned unmodified except to add or remove
a constant.
An iterable sequence will be joined together with ' + '.
A dictionary should have ``right_side`` and, optionally,
``left_side`` keys. The ``right_side`` can be a list or a string
and will be handled as above. If ``left_side`` is present it will
be joined with ``right_side`` with ' ~ '.
add_constant : bool, optional
Whether to add a ' + 1' (if True) or ' - 1' (if False) to the model.
If the expression already has a '+ 1' or '- 1' this option will be
ignored.
Returns
-------
model_expression : str
A string model expression suitable for use with statsmodels and patsy.
"""
if not isinstance(expr, str):
if isinstance(expr, collections.Mapping):
left_side = expr.get('left_side')
right_side = str_model_expression(expr['right_side'], add_constant)
else:
# some kind of iterable like a list
left_side = None
right_side = ' + '.join(expr)
if left_side:
model_expression = ' ~ '.join((left_side, right_side))
else:
model_expression = right_side
else:
model_expression = expr
if not has_constant_expr(model_expression):
if add_constant:
model_expression += ' + 1'
else:
model_expression += ' - 1'
logger.debug(
'converted expression: {!r} to model: {!r}'.format(
expr, model_expression))
return model_expression
def sorted_groupby(df, groupby):
"""
Perform a groupby on a DataFrame using a specific column
and assuming that that column is sorted.
Parameters
----------
df : pandas.DataFrame
groupby : object
Column name on which to groupby. This column must be sorted.
Returns
-------
generator
Yields pairs of group_name, DataFrame.
"""
start = 0
prev = df[groupby].iloc[start]
for i, x in enumerate(df[groupby]):
if x != prev:
yield prev, df.iloc[start:i]
prev = x
start = i
# need to send back the last group
yield prev, df.iloc[start:]
def columns_in_filters(filters):
"""
Returns a list of the columns used in a set of query filters.
Parameters
----------
filters : list of str or str
List of the filters as passed passed to ``apply_filter_query``.
Returns
-------
columns : list of str
List of all the strings mentioned in the filters.
"""
if not filters:
return []
if not isinstance(filters, str):
filters = ' '.join(filters)
columns = []
reserved = {'and', 'or', 'in', 'not'}
for toknum, tokval, _, _, _ in generate_tokens(StringIO(filters).readline):
if toknum == NAME and tokval not in reserved:
columns.append(tokval)
return list(tz.unique(columns))
def _tokens_from_patsy(node):
"""
Yields all the individual tokens from within a patsy formula
as parsed by patsy.parse_formula.parse_formula.
Parameters
----------
node : patsy.parse_formula.ParseNode
"""
for n in node.args:
for t in _tokens_from_patsy(n):
yield t
if node.token:
yield node.token
def columns_in_formula(formula):
"""
Returns the names of all the columns used in a patsy formula.
Parameters
----------
formula : str, iterable, or dict
Any formula construction supported by ``str_model_expression``.
Returns
-------
columns : list of str
"""
if formula is None:
return []
formula = str_model_expression(formula, add_constant=False)
columns = []
tokens = map(
lambda x: x.extra,
tz.remove(
lambda x: x.extra is None,
_tokens_from_patsy(patsy.parse_formula.parse_formula(formula))))
for tok in tokens:
# if there are parentheses in the expression we
# want to drop them and everything outside
# and start again from the top
if '(' in tok:
start = tok.find('(') + 1
fin = tok.rfind(')')
columns.extend(columns_in_formula(tok[start:fin]))
else:
for toknum, tokval, _, _, _ in generate_tokens(
StringIO(tok).readline):
if toknum == NAME:
columns.append(tokval)
return list(tz.unique(columns))
| bsd-3-clause |
andrebrener/crypto_predictor | backtest.py | 1 | 2740 | # =============================================================================
# File: backtest.py
# Author: Andre Brener
# Created: 12 Jun 2017
# Last Modified: 14 Jun 2017
# Description: description
# =============================================================================
from datetime import date, timedelta
import pandas as pd
from main import get_coin_decisions, get_daily_recommendations
from constants import BTC_AVAILABLE, COIN_DATA_DF, PRICE_PERIODS
from get_coin_data import get_price_history
def get_final_coin_position(total_decisions_df, total_prices_df):
day_cols = [
col for col in total_decisions_df.columns if 'index' not in str(col)
]
btc_available = BTC_AVAILABLE
new_coin_data_df = COIN_DATA_DF
for col in day_cols:
day_decision_df = total_decisions_df[['index', col]]
day_price_df = total_prices_df[['index', col]]
total_df, new_coin_data_df, btc_available = get_daily_recommendations(
new_coin_data_df, day_decision_df, day_price_df, btc_available)
final_position = new_coin_data_df
final_date = pd.to_datetime(col) + timedelta(PRICE_PERIODS)
return final_position, final_date, btc_available
def get_earnings(df, total_decisions_df, total_prices_df):
final_position, final_date, btc_available = get_final_coin_position(
total_decisions_df, total_prices_df)
df['date'] = pd.to_datetime(df['date'])
new_prices = df[df['date'] == final_date].T.reset_index()
new_prices.columns = ['coin', 'new_price']
final_df = pd.merge(final_position, new_prices)
final_df['btc_position'] = final_df['coin_position'] * final_df[
'new_price']
btc_invested = final_df['btc_position'].sum()
final_btc_position = final_df['btc_position'].sum() + btc_available
print('BTC Start: ', BTC_AVAILABLE)
print('BTC Invested: ', btc_invested)
print('BTC END: ', btc_available)
print('BTC Total: ', final_btc_position)
print('Earnings: ', (final_btc_position - BTC_AVAILABLE) / BTC_AVAILABLE)
def main_backtest():
print("Getting Coin Data")
coin_list = list(COIN_DATA_DF['coin'].unique())
end_date = date.today() - timedelta(1)
print("Getting predictions")
df = get_price_history(coin_list, end_date)
total_decisions_df, total_prices_df = get_coin_decisions(df)
print("Predictions done")
get_earnings(df, total_decisions_df, total_prices_df)
if __name__ == '__main__':
# df = pd.read_csv('historical_data.csv')
# total_decisions_df = pd.read_csv('backtest_decisions.csv')
# total_prices_df = pd.read_csv('backtest_prices.csv')
# get_earnings(df, total_decisions_df, total_prices_df)
main_backtest()
| mit |
suiyuan2009/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_queue_runner_test.py | 116 | 5164 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff._enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff._enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff._enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff._enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
kernc/scikit-learn | sklearn/neighbors/approximate.py | 40 | 22369 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
pyspace/pyspace | pySPACE/run/launch.py | 2 | 16052 | #!/usr/bin/env python
""" Main program to run pySPACE
For further instructions take a look at the pySPACE documentation and the tutorials
in there!
.. note::
Due to errors in configuration files, data or the software, the software may
crash. Because of internal parallelization and threading, it is currently
not possible to use ''ctrl + c''. So you should kill the processes manually
e.g.::
ctrl + z
kill -9 %1
fg
**Profiling**
For profiling the software you should use the option ``--profile``,
when running pySPACE. Furthermore you should use the
:class:`~pySPACE.environments.backends.serial.SerialBackend`.
Otherwise the started subprocesses can not be examined.
The result is called `profile.pstat` and saved in your result folder.
For getting a print out of this profiling, you can use gprof2dot.py in the
library folder. (partial) Copy from the documentation:
General usage::
python gprof2dot.py -f pstats profiling_file -r pySPACE | dot -Tpng -o output.png
or simple usage in result folder, when pyspace is on same level as general
storage folder::
python ../../../pyspace/pySPACE/tools/gprof2dot.py -f pstats profile.pstat | dot -Tpng -o output.png
where profiling_file is the file that is generated by the cProfile module
and output.png is the filename of the resulting picture.
The option '-r', '--restrict' is there to eliminate functions in the
profiling, that do not contain this string in
their path name [default: None].
.. note:: For creating the graphic, which is done with the 'dot' command,
the GraphViz package needs to be installed.
"""
# general imports
import os
import shutil
import sys
import logging
from optparse import OptionParser
import cProfile
import yaml
import warnings
# adding pySPACE to system path for import
file_path = os.path.dirname(os.path.realpath(__file__))
pyspace_path = file_path[:file_path.rfind('pySPACE')-1]
if not pyspace_path in sys.path:
sys.path.append(pyspace_path)
import pySPACE
import_path = os.path.realpath(os.path.join(os.path.dirname(pySPACE.__file__),
os.path.pardir))
if not import_path == pyspace_path:
warnings.warn("Check your Python path! "+
"'%s' is the expected pySPACE path," % pyspace_path +
" but '%s' is used." % import_path)
# pySPACE imports
from pySPACE.missions.operations.base import Operation, create_operation_from_file
from pySPACE.tools.filesystem import get_relative_path, create_source_archive
from pySPACE import create_backend
from pySPACE.environments.chains.operation_chain import create_operation_chain
from pySPACE.environments.big_bang import LOGGER
#import matplotlib
#matplotlib.use("MacOSX") #MacOSX")
class LaunchParser(OptionParser):
""" Overwrite epilog printout
Code taken from:
http://stackoverflow.com/questions/5961160/displaying-newlines-in-the-help-message-when-using-pythons-optparse
"""
def format_epilog(self, formatter):
""" Simply do not change the format of the string """
return self.epilog
epilog=\
"""
This is the script to launch pySPACE.
For detailed documentation on pySPACE refer to the online documentation at
http://pyspace.github.io/pyspace/index.html,
the __init__ file in the pySPACE folder, or the index.rst in the docs folder.
This script shall start pySPACE in the standard benchmark flow.
If you used the setup.py before or another installation program
all relevant files should be found in the folder `pySPACEcenter`
in your home directory. Otherwise it will be searched for in your
`PYSPACE_CONF_DIR`.
The main configuration is specified in the <config.yaml>. If you run pySPACE
for the first time, have a look at it and the therein specified environment
parameters.
Due to errors in configuration files, data or the software, the software may
crash. Because of internal parallelization and threading, it is currently
not possible to use ''ctrl + c''. So you should kill the processes manually
e.g.::
ctrl + z
kill -9 %1
fg
"""
def run_operation(default_backend, operation, ex_timeout=1e6, re_timeout=1e6):
""" Runs the given operation on the backend
Runs the given operation *operation* either on the backend specified in the
operation' spec file or (if none is specified) on the backend
passed as *default_backend*.
Different timeouts are required, because for the execute function get is
called which does not accept to high timeouts without proper error handling
on a Mac OS X whereas Linux systems are fine with larger timeouts.
"""
# Check if default backend can be used or if we have to run on a separate
# backend
if "backend" in operation.operation_spec:
backend = create_backend(operation.operation_spec["backend"])
LOGGER.info(" --> For current operation using backend: \n\t\t %s."%str(backend))
else:
backend = default_backend
# In case a operation_chain is executed the queue needs to be reset, since
# the the first terminated operation cleans and closes the queue.
if backend.__str__() == "MulticoreBackend":
backend.reset_queue()
backend.stage_in(operation)
try:
backend.execute(timeout=ex_timeout)
backend.retrieve(timeout=re_timeout)
backend.consolidate()
return operation.get_output_directory()
finally:
backend.cleanup()
def run_operation_chain(default_backend, operation_chain):
""" Runs the given operation chain on the backend
Runs the given operation chain *operation_chain* on the backend passed as
*default_backend*.
.. todo:: document override mode here and in tutorial
.. todo:: documentation needed for prepare operation and hidden params
.. todo:: parameter settings missing instead of parameter ranges?
"""
base_result_dir = operation_chain.get_output_directory()
input_path = operation_chain["input_path"]
prepare_operation = operation_chain["prepare_operation"] \
if "prepare_operation" in operation_chain else None
operations = operation_chain["operations"]
runs = operation_chain["runs"] if "runs" in operation_chain else 1
# Run prepare operation if requested
if prepare_operation is not None:
LOGGER.info("Running prepare operation of the operation chain")
# Create operation object for specified prepare operation
operation = create_operation_from_file(prepare_operation,
base_result_dir)
output_directory = run_operation(default_backend, operation)
# Rename output_directory
preparation_directory = os.sep.join(output_directory.split(os.sep)[:-1]) + \
os.sep + "prepare_operation"
shutil.move(output_directory, preparation_directory)
# Execute all operations of the operation chain sequentially
for index, operation in enumerate(operations):
overridden_params_dict = {}
if isinstance(operation, str):
op_spec_relative_filename = operation
else: # it should be a dictionary...
if 'operation_spec' in operation:
op_spec_relative_filename = operation['operation_spec']
else:
op_spec_relative_filename = None
try:
overridden_params_dict = operation["overridden_params"]
except KeyError:
pass
if op_spec_relative_filename is not None:
LOGGER.info("Running operation %s of the operation chain (%s/%s)" % \
(op_spec_relative_filename, index + 1, len(operations)))
spec_file_name = os.path.join(pySPACE.configuration.spec_dir,
"operations",
op_spec_relative_filename)
operation_spec = yaml.load(open(spec_file_name, "r"))
else:
# we expect to get everything from overridden params
operation_spec = {}
try:
operation_name = overridden_params_dict['operation_name']
except KeyError:
operation_name = "<unnamed>"
LOGGER.info("Running operation %s of the operation chain (%s/%s)" % \
(operation_name, index + 1, len(operations)))
operation_spec["input_path"] = input_path
operation_spec["runs"] = runs
# Add pseudo parameter "__PREPARE_OPERATION__" to parameter ranges
# if there was a prepare operation
if prepare_operation is not None :
if not "parameter_ranges" in operation_spec:
operation_spec["parameter_ranges"] = {}
operation_spec["parameter_ranges"]["__PREPARE_OPERATION__"] = [preparation_directory]
if not "hide_parameters" in operation_spec:
operation_spec["hide_parameters"] = []
operation_spec["hide_parameters"].append("__PREPARE_OPERATION__")
# override params with any explicitly specified params in the operation chain
# spec.
operation_spec.update(overridden_params_dict)
# use the operation factory method to create operation
operation = Operation.create(operation_spec,
base_result_dir = base_result_dir)
# Run the operation
output_directory = run_operation(default_backend, operation)
# The output acts as input for the next operation of the operation chain
input_path = get_relative_path(pySPACE.configuration.storage,
output_directory)
def main():
#### Find pySPACE package and import it ####
# Determine path of current file
path = os.path.realpath(__file__)
# Move up to parent directory that contains the pySPACE tree
suffix = []
for i in range(3):
path, tail = os.path.split(path)
suffix.append(tail)
parent_dir = path
# Check proper directory structure
if suffix != ['launch.py', 'run', 'pySPACE']:
raise RuntimeError, "Encountered incorrect directory structure. "\
"launch.py needs to reside in $PARENT_DIR/pySPACE/run"
# Workaround for eegserver crashing after 255 open ports
# - Now it crashes after 4096 open ports ;-)
#import resource
#(fd1, fd2) = resource.getrlimit(resource.RLIMIT_NOFILE)
#fd1 = 4096 if fd2 == resource.RLIM_INFINITY else fd2-1
#resource.setrlimit(resource.RLIMIT_NOFILE, (fd1,fd2))
# ------------------------------------------------------
#########################################
### Parsing of command line arguments
usage = "Usage: %prog [BACKEND_SPECIFICATION] [--config <conf.yaml>] "\
"[--operation <operation.yaml> | --operation_chain <operation_chain.yaml>] "\
"[--profile]"\
" where BACKEND_SPECIFICATION can be --serial, --mcore, --loadl or --mpi"
parser = LaunchParser(usage=usage, epilog=epilog)
# Configuration
parser.add_option("-c", "--configuration",
default="config.yaml",
help="Choose the configuration file, which is looked up in PYSPACE_CONF_DIR",
action="store")
# Backends
parser.add_option("-s", "--serial", action="store_true", default=False,
help="Enables execution on the SerialBackend (one local process)")
parser.add_option("-m", "--mcore", action="store_true", default=False,
help="Enables execution on the MulticoreBackend (one process per CPU core)")
parser.add_option("-l", "--local", action="store_true", default=False,
help="Enables execution on the MulticoreBackend (one process per CPU core)")
parser.add_option("-i", "--mpi", action="store_true", default=False,
help="Enables execution via MPI")
parser.add_option("-L", "--loadl", action="store_true", default=False,
help="Enables execution via LoadLeveler.")
# Operation / operation chain
parser.add_option("-o", "--operation",
help="Chooses the operation that will be executed. The "
"operation specification file is looked up in "
"$SPEC_DIR/operations",
action="store")
parser.add_option("-O", "-C", "--operation_chain",
help="Chooses the operation chain that will be executed. "
"The operation chain specification file is looked up "
"in $SPEC_DIR/operation_chains",
action="store")
# Profiling
parser.add_option("-p", "--profile",
help="Profiles execution.",
action="store_true", default=False,)
(options, args) = parser.parse_args()
# Load configuration file
pySPACE.load_configuration(options.configuration)
if hasattr(pySPACE.configuration, "eeg_acquisition_dir"):
eeg_parent_dir =\
os.sep.join(pySPACE.configuration.eeg_acquisition_dir.split(os.sep)[:-1])
if not hasattr(pySPACE.configuration, "eeg_acquisition_dir"):
pySPACE.configuration.eeg_module_path = eeg_parent_dir
else:
eeg_parent_dir, tail = os.path.split(parent_dir)
eeg_parent_dir = os.path.join(eeg_parent_dir, "eeg_modules")
pySPACE.configuration.eeg_module_path = eeg_parent_dir
sys.path.append(eeg_parent_dir)
# Create backend
if options.serial:
default_backend = create_backend("serial")
elif options.mcore or options.local:
default_backend = create_backend("mcore")
elif options.mpi:
default_backend = create_backend("mpi")
elif options.loadl:
default_backend = create_backend("loadl")
else: # Falling back to serial backend
default_backend = create_backend("serial")
LOGGER.info(" --> Using backend: \n\t\t %s."%str(default_backend))
if not options.operation is None:
# Create operation for the given name
operation = create_operation_from_file(options.operation)
# Store current source code for later inspection
create_source_archive(archive_path=operation.get_output_directory())
if not options.profile:
# Execute the current operation
run_operation(default_backend, operation)
else:
# Execute and profile operation
cProfile.runctx('pySPACE.run_operation(default_backend, operation)',
globals(), locals(),
filename = operation.get_output_directory()\
+ os.sep + "profile.pstat")
elif not options.operation_chain is None:
# Create operation chain for the given name
operation_chain = create_operation_chain(options.operation_chain)
# Store current source code for later inspection
create_source_archive(archive_path=operation_chain.get_output_directory())
if not options.profile:
# Execute the current operation_chain
run_operation_chain(default_backend, operation_chain)
else:
# Execute and profile operation
cProfile.runctx('pySPACE.run_operation_chain(default_backend, operation_chain)',
globals(), locals(),
filename=operation_chain.get_output_directory()\
+ os.sep + "profile.pstat")
else:
parser.error("Neither operation chain nor operation specification file given!")
logging.shutdown()
# Stop logger thread in backend
default_backend._stop_logging()
del default_backend
if __name__ == "__main__":
# run main with soft finishing
sys.exit(main())
# hard finish
os._exit(0)
| bsd-3-clause |
airbnb/superset | tests/model_tests.py | 1 | 14095 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import textwrap
import unittest
import pandas
from sqlalchemy.engine.url import make_url
import tests.test_app
from superset import app, db as metadata_db
from superset.models.core import Database
from superset.models.slice import Slice
from superset.utils.core import get_example_database, QueryStatus
from .base_tests import SupersetTestCase
class TestDatabaseModel(SupersetTestCase):
@unittest.skipUnless(
SupersetTestCase.is_module_installed("requests"), "requests not installed"
)
def test_database_schema_presto(self):
sqlalchemy_uri = "presto://presto.airbnb.io:8080/hive/default"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("hive/default", db)
db = make_url(model.get_sqla_engine(schema="core_db").url).database
self.assertEqual("hive/core_db", db)
sqlalchemy_uri = "presto://presto.airbnb.io:8080/hive"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("hive", db)
db = make_url(model.get_sqla_engine(schema="core_db").url).database
self.assertEqual("hive/core_db", db)
def test_database_schema_postgres(self):
sqlalchemy_uri = "postgresql+psycopg2://postgres.airbnb.io:5439/prod"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("prod", db)
db = make_url(model.get_sqla_engine(schema="foo").url).database
self.assertEqual("prod", db)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("thrift"), "thrift not installed"
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pyhive"), "pyhive not installed"
)
def test_database_schema_hive(self):
sqlalchemy_uri = "hive://hive@hive.airbnb.io:10000/default?auth=NOSASL"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("default", db)
db = make_url(model.get_sqla_engine(schema="core_db").url).database
self.assertEqual("core_db", db)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("MySQLdb"), "mysqlclient not installed"
)
def test_database_schema_mysql(self):
sqlalchemy_uri = "mysql://root@localhost/superset"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("superset", db)
db = make_url(model.get_sqla_engine(schema="staging").url).database
self.assertEqual("staging", db)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("MySQLdb"), "mysqlclient not installed"
)
def test_database_impersonate_user(self):
uri = "mysql://root@localhost"
example_user = "giuseppe"
model = Database(database_name="test_database", sqlalchemy_uri=uri)
model.impersonate_user = True
user_name = make_url(model.get_sqla_engine(user_name=example_user).url).username
self.assertEqual(example_user, user_name)
model.impersonate_user = False
user_name = make_url(model.get_sqla_engine(user_name=example_user).url).username
self.assertNotEqual(example_user, user_name)
def test_select_star(self):
db = get_example_database()
table_name = "energy_usage"
sql = db.select_star(table_name, show_cols=False, latest_partition=False)
quote = db.inspector.engine.dialect.identifier_preparer.quote_identifier
expected = (
textwrap.dedent(
f"""\
SELECT *
FROM {quote(table_name)}
LIMIT 100"""
)
if db.backend in {"presto", "hive"}
else textwrap.dedent(
f"""\
SELECT *
FROM {table_name}
LIMIT 100"""
)
)
assert expected in sql
sql = db.select_star(table_name, show_cols=True, latest_partition=False)
# TODO(bkyryliuk): unify sql generation
if db.backend == "presto":
assert (
textwrap.dedent(
"""\
SELECT "source" AS "source",
"target" AS "target",
"value" AS "value"
FROM "energy_usage"
LIMIT 100"""
)
== sql
)
elif db.backend == "hive":
assert (
textwrap.dedent(
"""\
SELECT `source`,
`target`,
`value`
FROM `energy_usage`
LIMIT 100"""
)
== sql
)
else:
assert (
textwrap.dedent(
"""\
SELECT source,
target,
value
FROM energy_usage
LIMIT 100"""
)
in sql
)
def test_select_star_fully_qualified_names(self):
db = get_example_database()
schema = "schema.name"
table_name = "table/name"
sql = db.select_star(
table_name, schema=schema, show_cols=False, latest_partition=False
)
fully_qualified_names = {
"sqlite": '"schema.name"."table/name"',
"mysql": "`schema.name`.`table/name`",
"postgres": '"schema.name"."table/name"',
}
fully_qualified_name = fully_qualified_names.get(db.db_engine_spec.engine)
if fully_qualified_name:
expected = textwrap.dedent(
f"""\
SELECT *
FROM {fully_qualified_name}
LIMIT 100"""
)
assert sql.startswith(expected)
def test_single_statement(self):
main_db = get_example_database()
if main_db.backend == "mysql":
df = main_db.get_df("SELECT 1", None)
self.assertEqual(df.iat[0, 0], 1)
df = main_db.get_df("SELECT 1;", None)
self.assertEqual(df.iat[0, 0], 1)
def test_multi_statement(self):
main_db = get_example_database()
if main_db.backend == "mysql":
df = main_db.get_df("USE superset; SELECT 1", None)
self.assertEqual(df.iat[0, 0], 1)
df = main_db.get_df("USE superset; SELECT ';';", None)
self.assertEqual(df.iat[0, 0], ";")
class TestSqlaTableModel(SupersetTestCase):
def test_get_timestamp_expression(self):
tbl = self.get_table_by_name("birth_names")
ds_col = tbl.get_column("ds")
sqla_literal = ds_col.get_timestamp_expression(None)
self.assertEqual(str(sqla_literal.compile()), "ds")
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(ds)")
prev_ds_expr = ds_col.expression
ds_col.expression = "DATE_ADD(ds, 1)"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(DATE_ADD(ds, 1))")
ds_col.expression = prev_ds_expr
def test_get_timestamp_expression_epoch(self):
tbl = self.get_table_by_name("birth_names")
ds_col = tbl.get_column("ds")
ds_col.expression = None
ds_col.python_date_format = "epoch_s"
sqla_literal = ds_col.get_timestamp_expression(None)
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "from_unixtime(ds)")
ds_col.python_date_format = "epoch_s"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(from_unixtime(ds))")
prev_ds_expr = ds_col.expression
ds_col.expression = "DATE_ADD(ds, 1)"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(from_unixtime(DATE_ADD(ds, 1)))")
ds_col.expression = prev_ds_expr
def query_with_expr_helper(self, is_timeseries, inner_join=True):
tbl = self.get_table_by_name("birth_names")
ds_col = tbl.get_column("ds")
ds_col.expression = None
ds_col.python_date_format = None
spec = self.get_database_by_id(tbl.database_id).db_engine_spec
if not spec.allows_joins and inner_join:
# if the db does not support inner joins, we cannot force it so
return None
old_inner_join = spec.allows_joins
spec.allows_joins = inner_join
arbitrary_gby = "state || gender || '_test'"
arbitrary_metric = dict(
label="arbitrary", expressionType="SQL", sqlExpression="SUM(sum_boys)"
)
query_obj = dict(
groupby=[arbitrary_gby, "name"],
metrics=[arbitrary_metric],
filter=[],
is_timeseries=is_timeseries,
columns=[],
granularity="ds",
from_dttm=None,
to_dttm=None,
extras=dict(time_grain_sqla="P1Y"),
)
qr = tbl.query(query_obj)
self.assertEqual(qr.status, QueryStatus.SUCCESS)
sql = qr.query
self.assertIn(arbitrary_gby, sql)
self.assertIn("name", sql)
if inner_join and is_timeseries:
self.assertIn("JOIN", sql.upper())
else:
self.assertNotIn("JOIN", sql.upper())
spec.allows_joins = old_inner_join
self.assertFalse(qr.df.empty)
return qr.df
def test_query_with_expr_groupby_timeseries(self):
if get_example_database().backend == "presto":
# TODO(bkyryliuk): make it work for presto.
return
def cannonicalize_df(df):
ret = df.sort_values(by=list(df.columns.values), inplace=False)
ret.reset_index(inplace=True, drop=True)
return ret
df1 = self.query_with_expr_helper(is_timeseries=True, inner_join=True)
name_list1 = cannonicalize_df(df1).name.values.tolist()
df2 = self.query_with_expr_helper(is_timeseries=True, inner_join=False)
name_list2 = cannonicalize_df(df1).name.values.tolist()
self.assertFalse(df2.empty)
expected_namelist = [
"Anthony",
"Brian",
"Christopher",
"Daniel",
"David",
"Eric",
"James",
"Jeffrey",
"John",
"Joseph",
"Kenneth",
"Kevin",
"Mark",
"Michael",
"Paul",
]
assert name_list2 == expected_namelist
assert name_list1 == expected_namelist
def test_query_with_expr_groupby(self):
self.query_with_expr_helper(is_timeseries=False)
def test_sql_mutator(self):
tbl = self.get_table_by_name("birth_names")
query_obj = dict(
groupby=[],
metrics=[],
filter=[],
is_timeseries=False,
columns=["name"],
granularity=None,
from_dttm=None,
to_dttm=None,
extras={},
)
sql = tbl.get_query_str(query_obj)
self.assertNotIn("-- COMMENT", sql)
def mutator(*args):
return "-- COMMENT\n" + args[0]
app.config["SQL_QUERY_MUTATOR"] = mutator
sql = tbl.get_query_str(query_obj)
self.assertIn("-- COMMENT", sql)
app.config["SQL_QUERY_MUTATOR"] = None
def test_query_with_non_existent_metrics(self):
tbl = self.get_table_by_name("birth_names")
query_obj = dict(
groupby=[],
metrics=["invalid"],
filter=[],
is_timeseries=False,
columns=["name"],
granularity=None,
from_dttm=None,
to_dttm=None,
extras={},
)
with self.assertRaises(Exception) as context:
tbl.get_query_str(query_obj)
self.assertTrue("Metric 'invalid' does not exist", context.exception)
def test_data_for_slices(self):
tbl = self.get_table_by_name("birth_names")
slc = (
metadata_db.session.query(Slice)
.filter_by(datasource_id=tbl.id, datasource_type=tbl.type)
.first()
)
data_for_slices = tbl.data_for_slices([slc])
self.assertEqual(len(data_for_slices["columns"]), 0)
self.assertEqual(len(data_for_slices["metrics"]), 1)
self.assertEqual(len(data_for_slices["verbose_map"].keys()), 2)
| apache-2.0 |
duerrp/pyexperiment | docs/conf.py | 4 | 12427 | # -*- coding: utf-8 -*-
#
# pyexperiment documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 25 13:55:57 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import importlib
import inspect
try:
from mock import MagicMock
except ImportError:
from unittest.mock import MagicMock
ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['pygtk',
'gtk',
'gobject',
'numpy',
'pandas',
'h5py',
'matplotlib',
'configobj',
'validate',
'argparse',
]
# if ON_RTD:
# MOCK_MODULES += ['six',
# 'six.moves',
# ]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from pyexperiment import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.linkcode',
]
if ON_RTD:
extensions += ['sphinxcontrib.napoleon',]
else:
extensions += ['sphinx.ext.napoleon',]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyexperiment'
copyright = u'2015, Peter Duerr'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**setup**', '../**tests**', '_templates']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Enable autosummary
autosummary_generate = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not ON_RTD:
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyexperimentdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyexperiment.tex', u'pyexperiment Documentation',
u'Peter Duerr', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyexperiment', u'pyexperiment Documentation',
[u'Peter Duerr'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyexperiment', u'pyexperiment Documentation',
u'Peter Duerr', 'pyexperiment', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pyexperiment'
epub_author = u'Peter Duerr'
epub_publisher = u'Peter Duerr'
epub_copyright = u'2015, Peter Duerr'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'pyexperiment'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
def linkcode_resolve(domain, info):
"""Link source code to github
"""
project = u'pyexperiment'
github_user = u'duerrp'
if domain != 'py' or not info['module']:
return None
filename = info['module'].replace('.', '/')
mod = importlib.import_module(info['module'])
basename = os.path.splitext(mod.__file__)[0]
if basename.endswith('__init__'):
filename += '/__init__'
item = mod
lineno = ''
for piece in info['fullname'].split('.'):
item = getattr(item, piece)
try:
lineno = '#L%d' % inspect.getsourcelines(item)[1]
except (TypeError, IOError):
pass
return ("https://github.com/%s/%s/blob/%s/%s.py%s" %
(github_user, project, release, filename, lineno))
| mit |
tranlyvu/kaggle | Digit Recognizer/main/second_attempt.py | 1 | 2058 | """
The script generate submission file for the kaggle contest "Digit recognizer",An
image recognition contest whose challenge was to classify handwritten single digits
dimensionality reduction with PCA+randomforest selection
Predictive model: svm
"""
import pandas as pd
import numpy as np
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
import decimal
def main():
decimal.getcontext().prec=4
#loading original train dataset
train_data=pd.read_csv('...Digit_Recognizer/train.csv')
#features from training set
train_features=train.values[:,1:]
#target from training set
train_target=train.ix[:,0]
#pre-processing train features
my_pca=PCA(n_components=0.90,whiten=True)
pca_train_features=pca.fit_transform(train_features)
#selecting feature using Random Forest
rfc=RandomForestClassifier()
rfc.fit(pca_train_features,train_target)
final_train_features=rfc.transform(pca_train_features)
#training SVM model
model=SVC(kernel='rbf')
#Grid search for model evaluation
C_power=[decimal.Decimal(x) for x in list(range(-5,17,2))]
gamma_power=[decimal.Decimal(x) for x in list(range(-15,5,2))]
grid_search_params={'C':list(np.power(2, C_power)),'gamma':list(np.power(2, gamma_power))}
gs=GridSearchCV(estimator=svc,param_grid=grid_search_params,scoring='accuracy',n_jobs=-1,cv=3)
#fitting the model
gs.fit(final_train_features,train_target)
#loading original test dataset
test_data=pd.read_csv('...Digit_Recognizer/test.csv')
#pro-processing test features
test_features=test_data.values
pca_test_features=my_pca.transform(test_features)
final_test_features=rfc.transform(pca_test_features)
#predicting from test set
prediction=gs.predict(final_test_features)
#preparing submission file
pd.DataFrame({"ImageId": range(1,len(prediction)+1), "Label": prediction}).to_csv('second_attempt.csv', index=False, header=True)
if __name__ == '__main__':
main() | apache-2.0 |
kjung/scikit-learn | examples/neural_networks/plot_mnist_filters.py | 57 | 2195 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
algorithm='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
Alexoner/mooc | cs231n/assignment2/cs231n/activation_statistics.py | 1 | 1817 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def activation_statistics(init_func=lambda fan_in, fan_out: np.random.randn(fan_in, fan_out) * 0.001, nonlinearity='tanh'):
"""TODO: Docstring for activation_statistics.
Demonstrate activation statistics with different weight initialization
:init_func: TODO
:returns: TODO
"""
# assume some uni gaussian 10-D input data
D = np.random.randn(1000, 500)
hidden_layer_sizes = [500]*10
nonlinearities = [nonlinearity]*len(hidden_layer_sizes)
act = {'relu': lambda x: np.maximum(0,x), 'tanh': lambda x: np.tanh(x)}
Hs = {}
for i in range(len(hidden_layer_sizes)):
X = D if i == 0 else Hs[i-1] # input at this layer
fan_in = X.shape[1]
fan_out = hidden_layer_sizes[i]
W = init_func(fan_in, fan_out) # layer initialization
H = np.dot(X, W) # matrix multiply
H = act[nonlinearities[i]](H) # nonlinearities
Hs[i] = H # cache result on this layer
# look at the distribution at each layer
print('input layer had mean %f and std %f' %(np.mean(D), np.std(D)))
layer_means = [np.mean(H) for i, H in Hs.iteritems()]
layer_stds = [np.std(H) for i, H in Hs.iteritems()]
for i, H in Hs.iteritems():
print('hidden layer %d had mean %f and std %f' % (i+1, layer_means[i], layer_stds[i]))
# plot the means and standard deviations
plt.figure()
plt.subplot(121)
plt.plot(Hs.keys(), layer_means, 'ob-')
plt.title('layer mean')
plt.subplot(122)
plt.plot(Hs.keys(), layer_stds, 'or-')
plt.title('layer std')
# plot the raw distribution
plt.figure()
for i,H in Hs.iteritems():
plt.subplot(2, len(Hs)/2, i+1)
plt.hist(H.ravel(), 30, range=(-1,1,))
| apache-2.0 |
Carralex/landlab | landlab/ca/examples/turbulent_suspension_with_settling_and_bleaching.py | 4 | 16830 | #!/usr/env/python
"""
isotropic_turbulent_suspension_with_settling_and_bleaching.py
Example of a continuous-time, stochastic, pair-based cellular automaton model,
which simulates the diffusion of suspended particles in a turbulent fluid.
Particles start with an accumulated luminescence signal L = 1, and are bleached
by exposure to light at a rate that depends on distance below the upper surface.
Written by Greg Tucker, July 2015
"""
from __future__ import print_function # for both python 2 and 3 compability
import time
import matplotlib
from pylab import figure, show, clf
from numpy import where, exp, amin
from landlab import RasterModelGrid, ModelParameterDictionary
from landlab.plot.imshow import imshow_node_grid
from landlab.components.cellular_automata.celllab_cts import Transition, CAPlotter
from landlab.components.cellular_automata.oriented_raster_cts import OrientedRasterCTS
class TurbulentSuspensionAndBleachingModel(OrientedRasterCTS):
"""
Examples
---------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 4
... model_grid_column__count: number of columns in grid
... 4
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 2.0
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.node_state
array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
>>> tsbm.grid.at_node['osl']
array([ 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.,
0., 0., 0.])
>>> tsbm.n_xn
array([0, 1, 1, 0, 0, 1, 1, 0])
>>> tsbm.fluid_surface_height
3.5
"""
def __init__(self, input_stream):
"""
Reads in parameters and initializes the model.
Examples
--------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 4
... model_grid_column__count: number of columns in grid
... 4
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 2.0
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.node_state
array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
>>> tsbm.grid.at_node['osl']
array([ 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.,
0., 0., 0.])
>>> tsbm.n_xn
array([0, 1, 1, 0, 0, 1, 1, 0])
>>> tsbm.fluid_surface_height
3.5
"""
# Get a source for input parameters.
params = ModelParameterDictionary(input_stream)
# Read user-defined parameters
nr = params.read_int('model_grid_row__count') # number of rows (CSDMS Standard Name [CSN])
nc = params.read_int('model_grid_column__count') # number of cols (CSN)
self.plot_interval = params.read_float('plot_interval') # interval for plotting output, s
self.run_duration = params.read_float('model__run_time') # duration of run, sec (CSN)
self.report_interval = params.read_float('model__report_interval') # report interval, in real-time seconds
self.bleach_T0 = params.read_float('surface_bleaching_time_scale') # time scale for bleaching at fluid surface, s
self.zstar = params.read_float('light_attenuation_length') # length scale for light attenuation in fluid, CELLS
# Derived parameters
self.fluid_surface_height = nr-0.5
# Calculate when we next want to report progress.
self.next_report = time.time() + self.report_interval
# Create grid
mg = RasterModelGrid(nr, nc, 1.0)
# Make the boundaries be walls
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
# Set up the states and pair transitions.
ns_dict = { 0 : 'fluid', 1 : 'particle' }
xn_list = self.setup_transition_list()
# Create the node-state array and attach it to the grid
node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=int)
# For visual display purposes, set all boundary nodes to fluid
node_state_grid[mg.closed_boundary_nodes] = 0
# Initialize the node-state array: here, the initial condition is a pile of
# resting grains at the bottom of a container.
bottom_rows = where(mg.node_y<0.4*nr)[0]
node_state_grid[bottom_rows] = 1
# Create a data array for bleaching.
# Here, osl=optically stimulated luminescence, normalized to the original
# signal (hence, initially all unity). Over time this signal may get
# bleached out due to exposure to light.
self.osl = mg.add_zeros('node', 'osl')
self.osl[bottom_rows] = 1.0
self.osl_display = mg.add_zeros('node', 'osl_display')
self.osl_display[bottom_rows] = 1.0
# We'll need an array to track the last time any given node was
# updated, so we can figure out the duration of light exposure between
# update events
self.last_update_time = mg.add_zeros('node','last_update_time')
# Call the base class (RasterCTS) init method
super(TurbulentSuspensionAndBleachingModel, \
self).__init__(mg, ns_dict, xn_list, node_state_grid, prop_data=self.osl)
# Set up plotting (if plotting desired)
if self.plot_interval <= self.run_duration:
self.initialize_plotting()
def initialize_plotting(self):
"""
Creates a CA plotter object, sets its colormap, and plots the initial
model state.
"""
# Set up some plotting information
grain = '#5F594D'
bleached_grain = '#CC0000'
fluid = '#D0E4F2'
clist = [fluid,bleached_grain,grain]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
self.ca_plotter = CAPlotter(self, cmap=my_cmap)
# Plot the initial grid
self.ca_plotter.update_plot()
# Make a colormap for use in showing the bleaching of each grain
clist = [(0.0, (1.0, 1.0, 1.0)), (0.49, (0.8, 0.8, 0.8)), (1.0, (0.0, 0.0, 0.0))]
self.cmap_for_osl = matplotlib.colors.LinearSegmentedColormap.from_list('osl_cmap', clist)
def setup_transition_list(self):
"""
Creates and returns a list of Transition() objects to represent state
transitions for a biased random walk, in which the rate of downward
motion is greater than the rate in the other three directions.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
State 0 represents fluid and state 1 represents a particle (such as a
sediment grain, tea leaf, or dissolved heavy particle).
The states and transitions are as follows:
Pair state Transition to Process Rate (cells/s)
========== ============= ======= ==============
0 (0-0) (none) - -
1 (0-1) 2 (1-0) left motion 10.0
2 (1-0) 1 (0-1) right motion 10.0
3 (1-1) (none) - -
4 (0-0) (none) - -
5 (0-1) 2 (1-0) down motion 10.55
6 (1-0) 1 (0-1) up motion 9.45
7 (1-1) (none) - -
"""
# Create an empty transition list
xn_list = []
# Append four transitions to the list.
# Note that the arguments to the Transition() object constructor are:
# - Tuple representing starting pair state
# (left cell, right cell, orientation [0=horizontal])
# - Tuple representing new pair state
# (bottom cell, top cell, orientation [1=vertical])
# - Transition rate (cells per time step, in this case 1 sec)
# - Name for transition
# - Flag indicating that the transition involves an exchange of properties
# - Function to be called after each transition, to update a property
# (in this case, to simulate bleaching of the luminescence signal)
xn_list.append( Transition((0,1,0), (1,0,0), 10., 'left motion', True, self.update_bleaching) )
xn_list.append( Transition((1,0,0), (0,1,0), 10., 'right motion', True, self.update_bleaching) )
xn_list.append( Transition((0,1,1), (1,0,1), 10.55, 'down motion', True, self.update_bleaching) )
xn_list.append( Transition((1,0,1), (0,1,1), 9.45, 'up motion', True, self.update_bleaching) )
return xn_list
def bleach_grain(self, node, dt):
"""
Updates the luminescence signal at node.
Examples
--------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 10
... model_grid_column__count: number of columns in grid
... 3
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 6.5
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.bleach_grain(10, 1.0)
>>> int(tsbm.prop_data[tsbm.propid[10]]*1000)
858
"""
depth = self.fluid_surface_height - self.grid.node_y[node]
T_bleach = self.bleach_T0*exp( depth/self.zstar)
self.prop_data[self.propid[node]] *= exp( -dt/T_bleach )
def update_bleaching(self, ca_unused, node1, node2, time_now):
"""
Updates the luminescence signal at a pair of nodes that have just
undergone a transition, if either or both nodes is a grain.
Examples
--------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 10
... model_grid_column__count: number of columns in grid
... 3
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 6.5
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.update_bleaching(tsbm, 10, 13, 1.0)
>>> int(tsbm.prop_data[tsbm.propid[10]]*1000)
858
>>> tsbm.prop_data[tsbm.propid[13]]
0.0
"""
if self.node_state[node1]==1:
dt = time_now - self.last_update_time[self.propid[node1]]
self.bleach_grain(node1, dt)
self.last_update_time[self.propid[node1]] = time_now
if self.node_state[node2]==1:
dt = time_now - self.last_update_time[self.propid[node2]]
self.bleach_grain(node2, dt)
self.last_update_time[self.propid[node2]] = time_now
def synchronize_bleaching(self, sync_time):
"""
Brings all nodes up to the same time, sync_time, by applying bleaching
up to this time, and updating last_update_time.
Notes
-----
In a CellLab-CTS model, the "time" is usually different for each node:
some will have only just recently undergone a transition and had their
properties (in this case, OSL bleaching) updated, while others will
have last been updated a long time ago, and some may never have had a
transition. If we want to plot the properties at a consistent time, we
need to bring all node properties (again, in this case, OSL) up to
date. This method does so.
We multiply elapsed time (between last update and "sync time") by
the node state, because we only want to update the solid particles---
because the state of a particle is 1 and fluid 0, this multiplication
masks out the fluid nodes.
We don't call bleach_grain(), because we want to take advantage of
numpy array operations rather than calling a method for each node.
Examples
--------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 10
... model_grid_column__count: number of columns in grid
... 3
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 6.5
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.synchronize_bleaching(1.0)
>>> int(tsbm.osl[10]*100000)
85897
"""
dt = (sync_time - self.last_update_time[self.propid])*self.node_state
assert (amin(dt)>=0.0), 'sync_time must be >= 0 everywhere'
depth = self.fluid_surface_height - self.grid.node_y
T_bleach = self.bleach_T0*exp( depth/self.zstar)
self.prop_data[self.propid] *= exp( -dt/T_bleach )
self.last_update_time[self.propid] = sync_time*self.node_state
def go(self):
"""
Runs the model.
"""
# RUN
while self.current_time < self.run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= self.next_report:
print('Current sim time',self.current_time,'(',100*self.current_time/self.run_duration,'%)')
self.next_report = current_real_time + self.report_interval
# Run the model forward in time until the next output step
self.run(self.current_time+self.plot_interval, self.node_state,
plot_each_transition=False)
self.current_time += self.plot_interval
self.synchronize_bleaching(self.current_time)
if self.plot_interval <= self.run_duration:
# Plot the current grid
self.ca_plotter.update_plot()
# Display the OSL content of grains
figure(3)
clf()
self.osl_display[:] = self.osl[self.propid]+self.node_state
imshow_node_grid(self.grid, 'osl_display', limits=(0.0, 2.0),
cmap=self.cmap_for_osl)
show()
figure(1)
def finalize(self):
# FINALIZE
# Plot
self.ca_plotter.finalize()
# If user runs this file, activate the main() function.
if __name__ == "__main__":
# Parse command-line argument, if any
import sys
if len(sys.argv)>1:
input_file_name = sys.argv[1]
else:
input_file_name = 'tsbm_inputs.txt'
# Instantiate the model
ca_model = TurbulentSuspensionAndBleachingModel(input_file_name)
# Run the model
ca_model.go()
# Clean up
ca_model.finalize()
| mit |
patrick-winter-knime/deep-learning-on-molecules | autoencoder_features/util/random_forest.py | 2 | 1688 | from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
import numpy
from progressbar import ProgressBar
def train(train_data_input, train_data_output, model_path, nr_trees=1000):
train_data_input = numerical_to_features(train_data_input)
random_forest = RandomForestClassifier(n_estimators=nr_trees, min_samples_leaf=1000, n_jobs=-1,
class_weight="balanced", verbose=1, criterion='gini')
random_forest.fit(train_data_input, train_data_output)
joblib.dump(random_forest, model_path)
def predict(test_data_input, model_path):
test_data_input = numerical_to_features(test_data_input)
random_forest = joblib.load(model_path)
probabilities = random_forest.predict_proba(test_data_input)
probabilities = numpy.array(probabilities)
return probabilities
def numerical_to_classes(numerical_data):
classes = numpy.ndarray((numerical_data.shape[0],), dtype='S1')
print('Converting training output')
with ProgressBar(max_value=len(numerical_data)) as progress:
for i in range(len(numerical_data)):
if numerical_data[i][0] >= numerical_data[i][1]:
classes[i] = 'a'
else:
classes[i] = 'i'
progress.update(i+1)
return classes
def numerical_to_features(numerical_data):
features = numpy.ndarray((numerical_data.shape[0], numerical_data.shape[1]))
print('Converting features')
with ProgressBar(max_value=len(numerical_data)) as progress:
for i in range(len(numerical_data)):
features[i] = numerical_data[i]
progress.update(i+1)
return features
| gpl-3.0 |
alexmojaki/odo | odo/backends/tests/test_sparksql.py | 2 | 6058 | from __future__ import print_function, absolute_import, division
import pytest
pyspark = pytest.importorskip('pyspark')
py4j = pytest.importorskip('py4j')
import os
import shutil
import json
import tempfile
from contextlib import contextmanager
import toolz
from toolz.compatibility import map
from pyspark.sql import Row, SchemaRDD
try:
from pyspark.sql.types import (ArrayType, StructField, StructType,
IntegerType)
from pyspark.sql.types import StringType
except ImportError:
from pyspark.sql import ArrayType, StructField, StructType, IntegerType
from pyspark.sql import StringType
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import datashape
from datashape import dshape
from odo import odo, discover, Directory, JSONLines
from odo.utils import tmpfile, ignoring
from odo.backends.sparksql import schema_to_dshape, dshape_to_schema
from odo.backends.sparksql import SparkDataFrame
data = [['Alice', 100.0, 1],
['Bob', 200.0, 2],
['Alice', 50.0, 3]]
df = pd.DataFrame(data, columns=['name', 'amount', 'id'])
@pytest.yield_fixture(scope='module')
def people(sc):
with tmpfile('.txt') as fn:
df.to_csv(fn, header=False, index=False)
raw = sc.textFile(fn)
parts = raw.map(lambda line: line.split(','))
yield parts.map(lambda person: Row(name=person[0],
amount=float(person[1]),
id=int(person[2])))
@pytest.fixture(scope='module')
def ctx(sqlctx, people):
try:
df = sqlctx.createDataFrame(people)
except AttributeError:
schema = sqlctx.inferSchema(people)
schema.registerTempTable('t')
schema.registerTempTable('t2')
else:
df2 = sqlctx.createDataFrame(people)
sqlctx.registerDataFrameAsTable(df, 't')
sqlctx.registerDataFrameAsTable(df2, 't2')
return sqlctx
def test_pyspark_to_sparksql(ctx, people):
sdf = odo(data, ctx, dshape=discover(df))
assert isinstance(sdf, (SparkDataFrame, SchemaRDD))
assert (list(map(set, odo(people, list))) ==
list(map(set, odo(sdf, list))))
def test_pyspark_to_sparksql_raises_on_tuple_dshape(ctx, people):
with pytest.raises(TypeError):
odo(data, ctx)
def test_dataframe_to_sparksql(ctx):
sdf = odo(df, ctx)
assert isinstance(sdf, (SparkDataFrame, SchemaRDD))
assert odo(sdf, list) == odo(df, list)
def test_sparksql_to_frame(ctx):
result = odo(ctx.table('t'), pd.DataFrame)
np.testing.assert_array_equal(result.sort_index(axis=1).values,
df.sort_index(axis=1).values)
def test_reduction_to_scalar(ctx):
result = odo(ctx.sql('select sum(amount) from t'), float)
assert isinstance(result, float)
assert result == sum(map(toolz.second, data))
def test_discover_context(ctx):
result = discover(ctx)
assert result is not None
def test_schema_to_dshape():
assert schema_to_dshape(IntegerType()) == datashape.int32
assert schema_to_dshape(
ArrayType(IntegerType(), False)) == dshape("var * int32")
assert schema_to_dshape(
ArrayType(IntegerType(), True)) == dshape("var * ?int32")
assert schema_to_dshape(StructType([
StructField('name', StringType(), False),
StructField('amount', IntegerType(), True)])) \
== dshape("{name: string, amount: ?int32}")
def test_dshape_to_schema():
assert dshape_to_schema('int32') == IntegerType()
assert dshape_to_schema('5 * int32') == ArrayType(IntegerType(), False)
assert dshape_to_schema('5 * ?int32') == ArrayType(IntegerType(), True)
assert dshape_to_schema('{name: string, amount: int32}') == \
StructType([StructField('name', StringType(), False),
StructField('amount', IntegerType(), False)])
assert dshape_to_schema('10 * {name: string, amount: ?int32}') == \
ArrayType(StructType(
[StructField('name', StringType(), False),
StructField('amount', IntegerType(), True)]),
False)
def test_append_spark_df_to_json_lines(ctx):
out = os.linesep.join(map(json.dumps, df.to_dict('records')))
sdf = ctx.table('t')
expected = pd.concat([df, df]).sort('amount').reset_index(drop=True).sort_index(axis=1)
with tmpfile('.json') as fn:
with open(fn, mode='w') as f:
f.write(out + os.linesep)
uri = 'jsonlines://%s' % fn
odo(sdf, uri)
result = odo(uri, pd.DataFrame).sort('amount').reset_index(drop=True).sort_index(axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(raises=py4j.protocol.Py4JJavaError,
reason='bug in sparksql')
def test_append(ctx):
"""Add support for odo(SparkDataFrame, SparkDataFrame) when this is fixed.
"""
a = ctx.table('t2')
a.insertInto('t2')
result = odo(odo(a, pd.DataFrame), set)
expected = odo(pd.concat([odo(a, pd.DataFrame)]) * 2, set)
assert result == expected
def test_load_from_jsonlines(ctx):
with tmpfile('.json') as fn:
js = odo(df, 'jsonlines://%s' % fn)
result = odo(js, ctx, name='r')
assert (list(map(set, odo(result, list))) ==
list(map(set, odo(df, list))))
@contextmanager
def jslines(n=3):
d = tempfile.mkdtemp()
files = []
dfc = df.copy()
for i in range(n):
_, fn = tempfile.mkstemp(suffix='.json', dir=d)
dfc['id'] += i
odo(dfc, 'jsonlines://%s' % fn)
files.append(fn)
yield d
with ignoring(OSError):
shutil.rmtree(d)
def test_load_from_dir_of_jsonlines(ctx):
dfs = []
dfc = df.copy()
for i in range(3):
dfc['id'] += i
dfs.append(dfc.copy())
expected = pd.concat(dfs, axis=0, ignore_index=True)
with jslines() as d:
result = odo(Directory(JSONLines)(d), ctx)
assert (set(map(frozenset, odo(result, list))) ==
set(map(frozenset, odo(expected, list))))
| bsd-3-clause |
petrbel/PscKonvertor | psc_konvertor/__init__.py | 2 | 1828 | # -*- coding: utf-8 -*-
import os
import pandas
__author__ = 'Petr Belohlavek <me@petrbel.cz>'
print()
class PscKonvertor:
"""Konvertuje postovni smerovaci cisla na prislusne okresy a kraje.
Vyhledavani je pro maximalni rychlost indexovane."""
_MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
_DATA_PATH = os.path.join(_MODULE_PATH, 'data')
def __init__(self, psc2okres_f=os.path.join(_DATA_PATH, 'psc2okres.csv'),
okres2kraj_f=os.path.join(_DATA_PATH, 'okres2kraj.csv')):
"""CSV tabulky se sloupci PSC,Okres a Okres,Kraj. Volitelne mohou obsahovat i dalsi sloupce."""
self.psc2okres_ = pandas.read_csv(psc2okres_f, header=0, encoding='utf-8')
self.psc2okres_ = self.psc2okres_.set_index(['PSC'])
self.okres2kraj_ = pandas.read_csv(okres2kraj_f, header=0, encoding='utf-8')
self.okres2kraj_ = self.okres2kraj_.set_index(['Okres'])
def psc2okres(self, psc):
"""Prevede `pcs` na okres, ve kterem dana obec lezi."""
psc_zaznamy = self.psc2okres_.loc[psc]
zaznam = {}
if type(psc_zaznamy) == pandas.core.frame.DataFrame:
zaznam = psc_zaznamy.iloc[0]
elif type(psc_zaznamy) == pandas.core.series.Series:
zaznam = psc_zaznamy
else:
raise KeyError('Unexpected type')
return zaznam['Okres']
def okres2kraj(self, okres):
"""Prevede `okres` na kraj, ve kterem dany okres lezi."""
if 'Praha' in okres:
return 'Hlavní město Praha'
else:
kraj_zaznam = self.okres2kraj_.loc[okres]
return kraj_zaznam['Kraj']
def psc2kraj(self, psc):
"""Prevede `pcs` na kraj, ve kterem dana obec lezi."""
okres = self.psc2okres(psc)
return self.okres2kraj(okres)
| mit |
devanshdalal/scikit-learn | examples/applications/plot_out_of_core_classification.py | 51 | 13651 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
# --------------------------------
#
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
# ------------
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
plotly/plotly.py | packages/python/plotly/plotly/tests/test_optional/test_px/test_px_input.py | 1 | 19349 | import plotly.express as px
import plotly.graph_objects as go
import numpy as np
import pandas as pd
import pytest
from plotly.express._core import build_dataframe
from pandas.testing import assert_frame_equal
def test_numpy():
fig = px.scatter(x=[1, 2, 3], y=[2, 3, 4], color=[1, 3, 9])
assert np.all(fig.data[0].x == np.array([1, 2, 3]))
assert np.all(fig.data[0].y == np.array([2, 3, 4]))
assert np.all(fig.data[0].marker.color == np.array([1, 3, 9]))
def test_numpy_labels():
fig = px.scatter(
x=[1, 2, 3], y=[2, 3, 4], labels={"x": "time"}
) # other labels will be kw arguments
assert fig.data[0]["hovertemplate"] == "time=%{x}<br>y=%{y}<extra></extra>"
def test_with_index():
tips = px.data.tips()
fig = px.scatter(tips, x=tips.index, y="total_bill")
assert (
fig.data[0]["hovertemplate"] == "index=%{x}<br>total_bill=%{y}<extra></extra>"
)
fig = px.scatter(tips, x=tips.index, y=tips.total_bill)
assert (
fig.data[0]["hovertemplate"] == "index=%{x}<br>total_bill=%{y}<extra></extra>"
)
fig = px.scatter(tips, x=tips.index, y=tips.total_bill, labels={"index": "number"})
assert (
fig.data[0]["hovertemplate"] == "number=%{x}<br>total_bill=%{y}<extra></extra>"
)
# We do not allow "x=index"
with pytest.raises(ValueError) as err_msg:
fig = px.scatter(tips, x="index", y="total_bill")
assert "To use the index, pass it in directly as `df.index`." in str(err_msg.value)
tips = px.data.tips()
tips.index.name = "item"
fig = px.scatter(tips, x=tips.index, y="total_bill")
assert fig.data[0]["hovertemplate"] == "item=%{x}<br>total_bill=%{y}<extra></extra>"
def test_pandas_series():
tips = px.data.tips()
before_tip = tips.total_bill - tips.tip
fig = px.bar(tips, x="day", y=before_tip)
assert fig.data[0].hovertemplate == "day=%{x}<br>y=%{y}<extra></extra>"
fig = px.bar(tips, x="day", y=before_tip, labels={"y": "bill"})
assert fig.data[0].hovertemplate == "day=%{x}<br>bill=%{y}<extra></extra>"
# lock down that we can pass df.col to facet_*
fig = px.bar(tips, x="day", y="tip", facet_row=tips.day, facet_col=tips.day)
assert fig.data[0].hovertemplate == "day=%{x}<br>tip=%{y}<extra></extra>"
def test_several_dataframes():
df = pd.DataFrame(dict(x=[0, 1], y=[1, 10], z=[0.1, 0.8]))
df2 = pd.DataFrame(dict(time=[23, 26], money=[100, 200]))
fig = px.scatter(df, x="z", y=df2.money, size="x")
assert (
fig.data[0].hovertemplate
== "z=%{x}<br>y=%{y}<br>x=%{marker.size}<extra></extra>"
)
fig = px.scatter(df2, x=df.z, y=df2.money, size=df.z)
assert (
fig.data[0].hovertemplate
== "x=%{x}<br>money=%{y}<br>size=%{marker.size}<extra></extra>"
)
# Name conflict
with pytest.raises(NameError) as err_msg:
fig = px.scatter(df, x="z", y=df2.money, size="y")
assert "A name conflict was encountered for argument 'y'" in str(err_msg.value)
with pytest.raises(NameError) as err_msg:
fig = px.scatter(df, x="z", y=df2.money, size=df.y)
assert "A name conflict was encountered for argument 'y'" in str(err_msg.value)
# No conflict when the dataframe is not given, fields are used
df = pd.DataFrame(dict(x=[0, 1], y=[3, 4]))
df2 = pd.DataFrame(dict(x=[3, 5], y=[23, 24]))
fig = px.scatter(x=df.y, y=df2.y)
assert np.all(fig.data[0].x == np.array([3, 4]))
assert np.all(fig.data[0].y == np.array([23, 24]))
assert fig.data[0].hovertemplate == "x=%{x}<br>y=%{y}<extra></extra>"
df = pd.DataFrame(dict(x=[0, 1], y=[3, 4]))
df2 = pd.DataFrame(dict(x=[3, 5], y=[23, 24]))
df3 = pd.DataFrame(dict(y=[0.1, 0.2]))
fig = px.scatter(x=df.y, y=df2.y, size=df3.y)
assert np.all(fig.data[0].x == np.array([3, 4]))
assert np.all(fig.data[0].y == np.array([23, 24]))
assert (
fig.data[0].hovertemplate
== "x=%{x}<br>y=%{y}<br>size=%{marker.size}<extra></extra>"
)
df = pd.DataFrame(dict(x=[0, 1], y=[3, 4]))
df2 = pd.DataFrame(dict(x=[3, 5], y=[23, 24]))
df3 = pd.DataFrame(dict(y=[0.1, 0.2]))
fig = px.scatter(x=df.y, y=df2.y, hover_data=[df3.y])
assert np.all(fig.data[0].x == np.array([3, 4]))
assert np.all(fig.data[0].y == np.array([23, 24]))
assert (
fig.data[0].hovertemplate
== "x=%{x}<br>y=%{y}<br>hover_data_0=%{customdata[0]}<extra></extra>"
)
def test_name_heuristics():
df = pd.DataFrame(dict(x=[0, 1], y=[3, 4], z=[0.1, 0.2]))
fig = px.scatter(df, x=df.y, y=df.x, size=df.y)
assert np.all(fig.data[0].x == np.array([3, 4]))
assert np.all(fig.data[0].y == np.array([0, 1]))
assert fig.data[0].hovertemplate == "y=%{marker.size}<br>x=%{y}<extra></extra>"
def test_repeated_name():
iris = px.data.iris()
fig = px.scatter(
iris,
x="sepal_width",
y="sepal_length",
hover_data=["petal_length", "petal_width", "species_id"],
custom_data=["species_id", "species"],
)
assert len(fig.data[0].customdata[0]) == 4
def test_arrayattrable_numpy():
tips = px.data.tips()
fig = px.scatter(
tips, x="total_bill", y="tip", hover_data=[np.random.random(tips.shape[0])]
)
assert (
fig.data[0]["hovertemplate"]
== "total_bill=%{x}<br>tip=%{y}<br>hover_data_0=%{customdata[0]}<extra></extra>"
)
tips = px.data.tips()
fig = px.scatter(
tips,
x="total_bill",
y="tip",
hover_data=[np.random.random(tips.shape[0])],
labels={"hover_data_0": "suppl"},
)
assert (
fig.data[0]["hovertemplate"]
== "total_bill=%{x}<br>tip=%{y}<br>suppl=%{customdata[0]}<extra></extra>"
)
def test_wrong_column_name():
with pytest.raises(ValueError) as err_msg:
px.scatter(px.data.tips(), x="bla", y="wrong")
assert "Value of 'x' is not the name of a column in 'data_frame'" in str(
err_msg.value
)
def test_missing_data_frame():
with pytest.raises(ValueError) as err_msg:
px.scatter(x="arg1", y="arg2")
assert "String or int arguments are only possible" in str(err_msg.value)
def test_wrong_dimensions_of_array():
with pytest.raises(ValueError) as err_msg:
px.scatter(x=[1, 2, 3], y=[2, 3, 4, 5])
assert "All arguments should have the same length." in str(err_msg.value)
def test_wrong_dimensions_mixed_case():
with pytest.raises(ValueError) as err_msg:
df = pd.DataFrame(dict(time=[1, 2, 3], temperature=[20, 30, 25]))
px.scatter(df, x="time", y="temperature", color=[1, 3, 9, 5])
assert "All arguments should have the same length." in str(err_msg.value)
def test_wrong_dimensions():
with pytest.raises(ValueError) as err_msg:
px.scatter(px.data.tips(), x="tip", y=[1, 2, 3])
assert "All arguments should have the same length." in str(err_msg.value)
# the order matters
with pytest.raises(ValueError) as err_msg:
px.scatter(px.data.tips(), x=[1, 2, 3], y="tip")
assert "All arguments should have the same length." in str(err_msg.value)
with pytest.raises(ValueError):
px.scatter(px.data.tips(), x=px.data.iris().index, y="tip")
assert "All arguments should have the same length." in str(err_msg.value)
def test_multiindex_raise_error():
index = pd.MultiIndex.from_product(
[[1, 2, 3], ["a", "b"]], names=["first", "second"]
)
df = pd.DataFrame(np.random.random((6, 3)), index=index, columns=["A", "B", "C"])
# This is ok
px.scatter(df, x="A", y="B")
with pytest.raises(TypeError) as err_msg:
px.scatter(df, x=df.index, y="B")
assert "pandas MultiIndex is not supported by plotly express" in str(err_msg.value)
def test_build_df_from_lists():
# Just lists
args = dict(x=[1, 2, 3], y=[2, 3, 4], color=[1, 3, 9])
output = {key: key for key in args}
df = pd.DataFrame(args)
args["data_frame"] = None
out = build_dataframe(args, go.Scatter)
assert_frame_equal(df.sort_index(axis=1), out["data_frame"].sort_index(axis=1))
out.pop("data_frame")
assert out == output
# Arrays
args = dict(x=np.array([1, 2, 3]), y=np.array([2, 3, 4]), color=[1, 3, 9])
output = {key: key for key in args}
df = pd.DataFrame(args)
args["data_frame"] = None
out = build_dataframe(args, go.Scatter)
assert_frame_equal(df.sort_index(axis=1), out["data_frame"].sort_index(axis=1))
out.pop("data_frame")
assert out == output
def test_build_df_with_index():
tips = px.data.tips()
args = dict(data_frame=tips, x=tips.index, y="total_bill")
out = build_dataframe(args, go.Scatter)
assert_frame_equal(tips.reset_index()[out["data_frame"].columns], out["data_frame"])
def test_timezones():
df = pd.DataFrame({"date": ["2015-04-04 19:31:30+1:00"], "value": [3]})
df["date"] = pd.to_datetime(df["date"])
args = dict(data_frame=df, x="date", y="value")
out = build_dataframe(args, go.Scatter)
assert str(out["data_frame"]["date"][0]) == str(df["date"][0])
def test_non_matching_index():
df = pd.DataFrame(dict(y=[1, 2, 3]), index=["a", "b", "c"])
expected = pd.DataFrame(dict(index=["a", "b", "c"], y=[1, 2, 3]))
args = dict(data_frame=df, x=df.index, y="y")
out = build_dataframe(args, go.Scatter)
assert_frame_equal(expected, out["data_frame"])
expected = pd.DataFrame(dict(x=["a", "b", "c"], y=[1, 2, 3]))
args = dict(data_frame=None, x=df.index, y=df.y)
out = build_dataframe(args, go.Scatter)
assert_frame_equal(expected, out["data_frame"])
args = dict(data_frame=None, x=["a", "b", "c"], y=df.y)
out = build_dataframe(args, go.Scatter)
assert_frame_equal(expected, out["data_frame"])
def test_splom_case():
iris = px.data.iris()
fig = px.scatter_matrix(iris)
assert len(fig.data[0].dimensions) == len(iris.columns)
dic = {"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
fig = px.scatter_matrix(dic)
assert np.all(fig.data[0].dimensions[0].values == np.array(dic["a"]))
ar = np.arange(9).reshape((3, 3))
fig = px.scatter_matrix(ar)
assert np.all(fig.data[0].dimensions[0].values == ar[:, 0])
def test_int_col_names():
# DataFrame with int column names
lengths = pd.DataFrame(np.random.random(100))
fig = px.histogram(lengths, x=0)
assert np.all(np.array(lengths).flatten() == fig.data[0].x)
# Numpy array
ar = np.arange(100).reshape((10, 10))
fig = px.scatter(ar, x=2, y=8)
assert np.all(fig.data[0].x == ar[:, 2])
def test_data_frame_from_dict():
fig = px.scatter({"time": [0, 1], "money": [1, 2]}, x="time", y="money")
assert fig.data[0].hovertemplate == "time=%{x}<br>money=%{y}<extra></extra>"
assert np.all(fig.data[0].x == [0, 1])
def test_arguments_not_modified():
iris = px.data.iris()
petal_length = iris.petal_length
hover_data = [iris.sepal_length]
px.scatter(iris, x=petal_length, y="petal_width", hover_data=hover_data)
assert iris.petal_length.equals(petal_length)
assert iris.sepal_length.equals(hover_data[0])
def test_pass_df_columns():
tips = px.data.tips()
fig = px.histogram(
tips,
x="total_bill",
y="tip",
color="sex",
marginal="rug",
hover_data=tips.columns,
)
# the "- 2" is because we re-use x and y in the hovertemplate where possible
assert fig.data[1].hovertemplate.count("customdata") == len(tips.columns) - 2
tips_copy = px.data.tips()
assert tips_copy.columns.equals(tips.columns)
def test_size_column():
df = px.data.tips()
fig = px.scatter(df, x=df["size"], y=df.tip)
assert fig.data[0].hovertemplate == "size=%{x}<br>tip=%{y}<extra></extra>"
def test_identity_map():
fig = px.scatter(
x=[1, 2],
y=[1, 2],
symbol=["a", "b"],
color=["red", "blue"],
color_discrete_map=px.IdentityMap(),
)
assert fig.data[0].marker.color == "red"
assert fig.data[1].marker.color == "blue"
assert "color=" not in fig.data[0].hovertemplate
assert "symbol=" in fig.data[0].hovertemplate
assert fig.layout.legend.title.text == "symbol"
fig = px.scatter(
x=[1, 2],
y=[1, 2],
symbol=["a", "b"],
color=["red", "blue"],
color_discrete_map="identity",
)
assert fig.data[0].marker.color == "red"
assert fig.data[1].marker.color == "blue"
assert "color=" not in fig.data[0].hovertemplate
assert "symbol=" in fig.data[0].hovertemplate
assert fig.layout.legend.title.text == "symbol"
def test_constants():
fig = px.scatter(x=px.Constant(1), y=[1, 2])
assert fig.data[0].x[0] == 1
assert fig.data[0].x[1] == 1
assert "x=" in fig.data[0].hovertemplate
fig = px.scatter(x=px.Constant(1, label="time"), y=[1, 2])
assert fig.data[0].x[0] == 1
assert fig.data[0].x[1] == 1
assert "x=" not in fig.data[0].hovertemplate
assert "time=" in fig.data[0].hovertemplate
fig = px.scatter(
x=[1, 2],
y=[1, 2],
symbol=["a", "b"],
color=px.Constant("red", label="the_identity_label"),
hover_data=[px.Constant("data", label="the_data")],
color_discrete_map=px.IdentityMap(),
)
assert fig.data[0].marker.color == "red"
assert fig.data[0].customdata[0][0] == "data"
assert fig.data[1].marker.color == "red"
assert "color=" not in fig.data[0].hovertemplate
assert "the_identity_label=" not in fig.data[0].hovertemplate
assert "symbol=" in fig.data[0].hovertemplate
assert "the_data=" in fig.data[0].hovertemplate
assert fig.layout.legend.title.text == "symbol"
def test_ranges():
fig = px.scatter(x=px.Range(), y=[1, 2], hover_data=[px.Range()])
assert fig.data[0].x[0] == 0
assert fig.data[0].x[1] == 1
assert fig.data[0].customdata[0][0] == 0
assert fig.data[0].customdata[1][0] == 1
assert "x=" in fig.data[0].hovertemplate
fig = px.scatter(x=px.Range(label="time"), y=[1, 2])
assert fig.data[0].x[0] == 0
assert fig.data[0].x[1] == 1
assert "x=" not in fig.data[0].hovertemplate
assert "time=" in fig.data[0].hovertemplate
@pytest.mark.parametrize(
"fn",
[px.scatter, px.line, px.area, px.violin, px.box, px.strip]
+ [px.bar, px.funnel, px.histogram],
)
@pytest.mark.parametrize(
"x,y,result",
[
("numerical", "categorical", "h"),
("categorical", "numerical", "v"),
("categorical", "categorical", "v"),
("numerical", "numerical", "v"),
("numerical", "none", "h"),
("categorical", "none", "h"),
("none", "categorical", "v"),
("none", "numerical", "v"),
],
)
def test_auto_orient_x_and_y(fn, x, y, result):
series = dict(categorical=["a", "a", "b", "b"], numerical=[1, 2, 3, 4], none=None)
if "none" not in [x, y]:
assert fn(x=series[x], y=series[y]).data[0].orientation == result
else:
if fn == px.histogram or (fn == px.bar and "categorical" in [x, y]):
assert fn(x=series[x], y=series[y]).data[0].orientation != result
else:
assert fn(x=series[x], y=series[y]).data[0].orientation == result
def test_histogram_auto_orient():
numerical = [1, 2, 3, 4]
assert px.histogram(x=numerical, nbins=5).data[0].nbinsx == 5
assert px.histogram(y=numerical, nbins=5).data[0].nbinsy == 5
assert px.histogram(x=numerical, y=numerical, nbins=5).data[0].nbinsx == 5
def test_auto_histfunc():
a = [1, 2]
assert px.histogram(x=a).data[0].histfunc is None
assert px.histogram(y=a).data[0].histfunc is None
assert px.histogram(x=a, y=a).data[0].histfunc == "sum"
assert px.histogram(x=a, y=a, histfunc="avg").data[0].histfunc == "avg"
assert px.density_heatmap(x=a, y=a).data[0].histfunc is None
assert px.density_heatmap(x=a, y=a, z=a).data[0].histfunc == "sum"
assert px.density_heatmap(x=a, y=a, z=a, histfunc="avg").data[0].histfunc == "avg"
@pytest.mark.parametrize(
"fn,mode", [(px.violin, "violinmode"), (px.box, "boxmode"), (px.strip, "boxmode")]
)
@pytest.mark.parametrize(
"x,y,color,result",
[
("categorical1", "numerical", None, "group"),
("categorical1", "numerical", "categorical2", "group"),
("categorical1", "numerical", "categorical1", "overlay"),
("numerical", "categorical1", None, "group"),
("numerical", "categorical1", "categorical2", "group"),
("numerical", "categorical1", "categorical1", "overlay"),
],
)
def test_auto_boxlike_overlay(fn, mode, x, y, color, result):
df = pd.DataFrame(
dict(
categorical1=["a", "a", "b", "b"],
categorical2=["a", "a", "b", "b"],
numerical=[1, 2, 3, 4],
)
)
assert fn(df, x=x, y=y, color=color).layout[mode] == result
@pytest.mark.parametrize("fn", [px.scatter, px.line, px.area, px.bar])
def test_x_or_y(fn):
categorical = ["a", "a", "b", "b"]
numerical = [1, 2, 3, 4]
constant = [1, 1, 1, 1]
range_4 = [0, 1, 2, 3]
index = [11, 12, 13, 14]
numerical_df = pd.DataFrame(dict(col=numerical), index=index)
categorical_df = pd.DataFrame(dict(col=categorical), index=index)
fig = fn(x=numerical)
assert list(fig.data[0].x) == numerical
assert list(fig.data[0].y) == range_4
assert fig.data[0].orientation == "h"
fig = fn(y=numerical)
assert list(fig.data[0].x) == range_4
assert list(fig.data[0].y) == numerical
assert fig.data[0].orientation == "v"
fig = fn(numerical_df, x="col")
assert list(fig.data[0].x) == numerical
assert list(fig.data[0].y) == index
assert fig.data[0].orientation == "h"
fig = fn(numerical_df, y="col")
assert list(fig.data[0].x) == index
assert list(fig.data[0].y) == numerical
assert fig.data[0].orientation == "v"
if fn != px.bar:
fig = fn(x=categorical)
assert list(fig.data[0].x) == categorical
assert list(fig.data[0].y) == range_4
assert fig.data[0].orientation == "h"
fig = fn(y=categorical)
assert list(fig.data[0].x) == range_4
assert list(fig.data[0].y) == categorical
assert fig.data[0].orientation == "v"
fig = fn(categorical_df, x="col")
assert list(fig.data[0].x) == categorical
assert list(fig.data[0].y) == index
assert fig.data[0].orientation == "h"
fig = fn(categorical_df, y="col")
assert list(fig.data[0].x) == index
assert list(fig.data[0].y) == categorical
assert fig.data[0].orientation == "v"
else:
fig = fn(x=categorical)
assert list(fig.data[0].x) == categorical
assert list(fig.data[0].y) == constant
assert fig.data[0].orientation == "v"
fig = fn(y=categorical)
assert list(fig.data[0].x) == constant
assert list(fig.data[0].y) == categorical
assert fig.data[0].orientation == "h"
fig = fn(categorical_df, x="col")
assert list(fig.data[0].x) == categorical
assert list(fig.data[0].y) == constant
assert fig.data[0].orientation == "v"
fig = fn(categorical_df, y="col")
assert list(fig.data[0].x) == constant
assert list(fig.data[0].y) == categorical
assert fig.data[0].orientation == "h"
| mit |
BoltzmannBrain/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| agpl-3.0 |
466152112/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 121 | 3429 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/misc/pythonic_matplotlib.py | 1 | 3300 | """
===================
Pythonic Matplotlib
===================
Some people prefer to write more pythonic, object-oriented code
rather than use the pyplot interface to matplotlib. This example shows
you how.
Unless you are an application developer, I recommend using part of the
pyplot interface, particularly the figure, close, subplot, axes, and
show commands. These hide a lot of complexity from you that you don't
need to see in normal figure creation, like instantiating DPI
instances, managing the bounding boxes of the figure elements,
creating and realizing GUI windows and embedding figures in them.
If you are an application developer and want to embed matplotlib in
your application, follow the lead of examples/embedding_in_wx.py,
examples/embedding_in_gtk.py or examples/embedding_in_tk.py. In this
case you will want to control the creation of all your figures,
embedding them in application windows, etc.
If you are a web application developer, you may want to use the
example in webapp_demo.py, which shows how to use the backend agg
figure canvas directly, with none of the globals (current figure,
current axes) that are present in the pyplot interface. Note that
there is no reason why the pyplot interface won't work for web
application developers, however.
If you see an example in the examples dir written in pyplot interface,
and you want to emulate that using the true python method calls, there
is an easy mapping. Many of those examples use 'set' to control
figure properties. Here's how to map those commands onto instance
methods
The syntax of set is::
plt.setp(object or sequence, somestring, attribute)
if called with an object, set calls::
object.set_somestring(attribute)
if called with a sequence, set does::
for object in sequence:
object.set_somestring(attribute)
So for your example, if a is your axes object, you can do::
a.set_xticklabels([])
a.set_yticklabels([])
a.set_xticks([])
a.set_yticks([])
"""
import matplotlib.pyplot as plt #import figure, show
from numpy import arange, sin, pi
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
t = arange(0.0, 1.0, 0.01)
fig = plt.figure(1)
ax1 = fig.add_subplot(211)
ax1.plot(t, sin(2*pi * t))
ax1.grid(True)
ax1.set_ylim((-2, 2))
ax1.set_ylabel('1 Hz')
ax1.set_title('A sine wave or two')
ax1.xaxis.set_tick_params(labelcolor='r')
ax2 = fig.add_subplot(212)
ax2.plot(t, sin(2 * 2*pi * t))
ax2.grid(True)
ax2.set_ylim((-2, 2))
l = ax2.set_xlabel('Hi mom')
l.set_color('g')
l.set_fontsize('large')
pltshow(plt)
| mit |
rs2/pandas | pandas/tests/indexes/ranges/test_range.py | 1 | 16923 | import numpy as np
import pytest
from pandas.core.dtypes.common import ensure_platform_int
import pandas as pd
from pandas import Float64Index, Index, Int64Index, RangeIndex
import pandas._testing as tm
from ..test_numeric import Numeric
# aliases to make some tests easier to read
RI = RangeIndex
I64 = Int64Index
F64 = Float64Index
OI = Index
class TestRangeIndex(Numeric):
_holder = RangeIndex
_compat_props = ["shape", "ndim", "size"]
@pytest.fixture(
params=[
RangeIndex(start=0, stop=20, step=2, name="foo"),
RangeIndex(start=18, stop=-1, step=-2, name="bar"),
],
ids=["index_inc", "index_dec"],
)
def index(self, request):
return request.param
def create_index(self) -> RangeIndex:
return RangeIndex(start=0, stop=20, step=2)
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_too_many_names(self):
index = self.create_index()
with pytest.raises(ValueError, match="^Length"):
index.names = ["roger", "harold"]
@pytest.mark.parametrize(
"index, start, stop, step",
[
(RangeIndex(5), 0, 5, 1),
(RangeIndex(0, 5), 0, 5, 1),
(RangeIndex(5, step=2), 0, 5, 2),
(RangeIndex(1, 5, 2), 1, 5, 2),
],
)
def test_start_stop_step_attrs(self, index, start, stop, step):
# GH 25710
assert index.start == start
assert index.stop == stop
assert index.step == step
@pytest.mark.parametrize("attr_name", ["_start", "_stop", "_step"])
def test_deprecated_start_stop_step_attrs(self, attr_name):
# GH 26581
idx = self.create_index()
with tm.assert_produces_warning(FutureWarning):
getattr(idx, attr_name)
def test_copy(self):
i = RangeIndex(5, name="Foo")
i_copy = i.copy()
assert i_copy is not i
assert i_copy.identical(i)
assert i_copy._range == range(0, 5, 1)
assert i_copy.name == "Foo"
def test_repr(self):
i = RangeIndex(5, name="Foo")
result = repr(i)
expected = "RangeIndex(start=0, stop=5, step=1, name='Foo')"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
i = RangeIndex(5, 0, -1)
result = repr(i)
expected = "RangeIndex(start=5, stop=0, step=-1)"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
def test_insert(self):
idx = RangeIndex(5, name="Foo")
result = idx[1:4]
# test 0th element
tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]))
# GH 18295 (test missing)
expected = Float64Index([0, np.nan, 1, 2, 3, 4])
for na in [np.nan, None, pd.NA]:
result = RangeIndex(5).insert(1, na)
tm.assert_index_equal(result, expected)
result = RangeIndex(5).insert(1, pd.NaT)
expected = pd.Index([0, pd.NaT, 1, 2, 3, 4], dtype=object)
tm.assert_index_equal(result, expected)
def test_delete(self):
idx = RangeIndex(5, name="Foo")
expected = idx[1:].astype(int)
result = idx.delete(0)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
expected = idx[:-1].astype(int)
result = idx.delete(-1)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
msg = "index 5 is out of bounds for axis 0 with size 5"
with pytest.raises((IndexError, ValueError), match=msg):
# either depending on numpy version
result = idx.delete(len(idx))
def test_view(self):
i = RangeIndex(0, name="Foo")
i_view = i.view()
assert i_view.name == "Foo"
i_view = i.view("i8")
tm.assert_numpy_array_equal(i.values, i_view)
i_view = i.view(RangeIndex)
tm.assert_index_equal(i, i_view)
def test_dtype(self):
index = self.create_index()
assert index.dtype == np.int64
def test_cache(self):
# GH 26565, GH26617, GH35432
# This test checks whether _cache has been set.
# Calling RangeIndex._cache["_data"] creates an int64 array of the same length
# as the RangeIndex and stores it in _cache.
idx = RangeIndex(0, 100, 10)
assert idx._cache == {}
repr(idx)
assert idx._cache == {}
str(idx)
assert idx._cache == {}
idx.get_loc(20)
assert idx._cache == {}
90 in idx # True
assert idx._cache == {}
91 in idx # False
assert idx._cache == {}
idx.all()
assert idx._cache == {}
idx.any()
assert idx._cache == {}
for _ in idx:
pass
assert idx._cache == {}
idx.format()
assert idx._cache == {}
df = pd.DataFrame({"a": range(10)}, index=idx)
str(df)
assert idx._cache == {}
df.loc[50]
assert idx._cache == {}
with pytest.raises(KeyError, match="51"):
df.loc[51]
assert idx._cache == {}
df.loc[10:50]
assert idx._cache == {}
df.iloc[5:10]
assert idx._cache == {}
# idx._cache should contain a _data entry after call to idx._data
idx._data
assert isinstance(idx._data, np.ndarray)
assert idx._data is idx._data # check cached value is reused
assert len(idx._cache) == 4
expected = np.arange(0, 100, 10, dtype="int64")
tm.assert_numpy_array_equal(idx._cache["_data"], expected)
def test_is_monotonic(self):
index = RangeIndex(0, 20, 2)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is False
index = RangeIndex(4, 0, -1)
assert index.is_monotonic is False
assert index._is_strictly_monotonic_increasing is False
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 2)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(2, 1)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 1)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
def test_equals_range(self):
equiv_pairs = [
(RangeIndex(0, 9, 2), RangeIndex(0, 10, 2)),
(RangeIndex(0), RangeIndex(1, -1, 3)),
(RangeIndex(1, 2, 3), RangeIndex(1, 3, 4)),
(RangeIndex(0, -9, -2), RangeIndex(0, -10, -2)),
]
for left, right in equiv_pairs:
assert left.equals(right)
assert right.equals(left)
def test_logical_compat(self):
idx = self.create_index()
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def test_identical(self):
index = self.create_index()
i = Index(index.copy())
assert i.identical(index)
# we don't allow object dtype for RangeIndex
if isinstance(index, RangeIndex):
return
same_values_different_type = Index(i, dtype=object)
assert not i.identical(same_values_different_type)
i = index.copy(dtype=object)
i = i.rename("foo")
same_values = Index(i, dtype=object)
assert same_values.identical(index.copy(dtype=object))
assert not i.identical(index)
assert Index(same_values, name="foo", dtype=object).identical(i)
assert not index.copy(dtype=object).identical(index.copy(dtype="int64"))
def test_nbytes(self):
# memory savings vs int index
i = RangeIndex(0, 1000)
assert i.nbytes < i._int64index.nbytes / 10
# constant memory usage
i2 = RangeIndex(0, 10)
assert i.nbytes == i2.nbytes
@pytest.mark.parametrize(
"start,stop,step",
[
# can't
("foo", "bar", "baz"),
# shouldn't
("0", "1", "2"),
],
)
def test_cant_or_shouldnt_cast(self, start, stop, step):
msg = f"Wrong type {type(start)} for value {start}"
with pytest.raises(TypeError, match=msg):
RangeIndex(start, stop, step)
def test_view_index(self):
index = self.create_index()
index.view(Index)
def test_prevent_casting(self):
index = self.create_index()
result = index.astype("O")
assert result.dtype == np.object_
def test_repr_roundtrip(self):
index = self.create_index()
tm.assert_index_equal(eval(repr(index)), index)
def test_slice_keep_name(self):
idx = RangeIndex(1, 2, name="asdf")
assert idx.name == idx[1:].name
def test_explicit_conversions(self):
# GH 8608
# add/sub are overridden explicitly for Float/Int Index
idx = RangeIndex(5)
# float conversions
arr = np.arange(5, dtype="int64") * 3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx, expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx, expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5, dtype="float64")
result = fidx - a
tm.assert_index_equal(result, expected)
expected = Float64Index(-arr)
a = np.zeros(5, dtype="float64")
result = a - fidx
tm.assert_index_equal(result, expected)
def test_has_duplicates(self, index):
assert index.is_unique
assert not index.has_duplicates
def test_extended_gcd(self):
index = self.create_index()
result = index._extended_gcd(6, 10)
assert result[0] == result[1] * 6 + result[2] * 10
assert 2 == result[0]
result = index._extended_gcd(10, 6)
assert 2 == result[1] * 10 + result[2] * 6
assert 2 == result[0]
def test_min_fitting_element(self):
result = RangeIndex(0, 20, 2)._min_fitting_element(1)
assert 2 == result
result = RangeIndex(1, 6)._min_fitting_element(1)
assert 1 == result
result = RangeIndex(18, -2, -2)._min_fitting_element(1)
assert 2 == result
result = RangeIndex(5, 0, -1)._min_fitting_element(1)
assert 1 == result
big_num = 500000000000000000000000
result = RangeIndex(5, big_num * 2, 1)._min_fitting_element(big_num)
assert big_num == result
def test_max_fitting_element(self):
result = RangeIndex(0, 20, 2)._max_fitting_element(17)
assert 16 == result
result = RangeIndex(1, 6)._max_fitting_element(4)
assert 4 == result
result = RangeIndex(18, -2, -2)._max_fitting_element(17)
assert 16 == result
result = RangeIndex(5, 0, -1)._max_fitting_element(4)
assert 4 == result
big_num = 500000000000000000000000
result = RangeIndex(5, big_num * 2, 1)._max_fitting_element(big_num)
assert big_num == result
def test_pickle_compat_construction(self):
# RangeIndex() is a valid constructor
pass
def test_slice_specialised(self):
index = self.create_index()
index.name = "foo"
# scalar indexing
res = index[1]
expected = 2
assert res == expected
res = index[-1]
expected = 18
assert res == expected
# slicing
# slice value completion
index_slice = index[:]
expected = index
tm.assert_index_equal(index_slice, expected)
# positive slice values
index_slice = index[7:10:2]
expected = Index(np.array([14, 18]), name="foo")
tm.assert_index_equal(index_slice, expected)
# negative slice values
index_slice = index[-1:-5:-2]
expected = Index(np.array([18, 14]), name="foo")
tm.assert_index_equal(index_slice, expected)
# stop overshoot
index_slice = index[2:100:4]
expected = Index(np.array([4, 12]), name="foo")
tm.assert_index_equal(index_slice, expected)
# reverse
index_slice = index[::-1]
expected = Index(index.values[::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[-8::-1]
expected = Index(np.array([4, 2, 0]), name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[-40::-1]
expected = Index(np.array([], dtype=np.int64), name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[40::-1]
expected = Index(index.values[40::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[10::-1]
expected = Index(index.values[::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
@pytest.mark.parametrize("step", set(range(-5, 6)) - {0})
def test_len_specialised(self, step):
# make sure that our len is the same as np.arange calc
start, stop = (0, 5) if step > 0 else (5, 0)
arr = np.arange(start, stop, step)
index = RangeIndex(start, stop, step)
assert len(index) == len(arr)
index = RangeIndex(stop, start, step)
assert len(index) == 0
@pytest.fixture(
params=[
([RI(1, 12, 5)], RI(1, 12, 5)),
([RI(0, 6, 4)], RI(0, 6, 4)),
([RI(1, 3), RI(3, 7)], RI(1, 7)),
([RI(1, 5, 2), RI(5, 6)], RI(1, 6, 2)),
([RI(1, 3, 2), RI(4, 7, 3)], RI(1, 7, 3)),
([RI(-4, 3, 2), RI(4, 7, 2)], RI(-4, 7, 2)),
([RI(-4, -8), RI(-8, -12)], RI(0, 0)),
([RI(-4, -8), RI(3, -4)], RI(0, 0)),
([RI(-4, -8), RI(3, 5)], RI(3, 5)),
([RI(-4, -2), RI(3, 5)], I64([-4, -3, 3, 4])),
([RI(-2), RI(3, 5)], RI(3, 5)),
([RI(2), RI(2)], I64([0, 1, 0, 1])),
([RI(2), RI(2, 5), RI(5, 8, 4)], RI(0, 6)),
([RI(2), RI(3, 5), RI(5, 8, 4)], I64([0, 1, 3, 4, 5])),
([RI(-2, 2), RI(2, 5), RI(5, 8, 4)], RI(-2, 6)),
([RI(3), I64([-1, 3, 15])], I64([0, 1, 2, -1, 3, 15])),
([RI(3), F64([-1, 3.1, 15.0])], F64([0, 1, 2, -1, 3.1, 15.0])),
([RI(3), OI(["a", None, 14])], OI([0, 1, 2, "a", None, 14])),
([RI(3, 1), OI(["a", None, 14])], OI(["a", None, 14])),
]
)
def appends(self, request):
"""Inputs and expected outputs for RangeIndex.append test"""
return request.param
def test_append(self, appends):
# GH16212
indices, expected = appends
result = indices[0].append(indices[1:])
tm.assert_index_equal(result, expected, exact=True)
if len(indices) == 2:
# Append single item rather than list
result2 = indices[0].append(indices[1])
tm.assert_index_equal(result2, expected, exact=True)
def test_engineless_lookup(self):
# GH 16685
# Standard lookup on RangeIndex should not require the engine to be
# created
idx = RangeIndex(2, 10, 3)
assert idx.get_loc(5) == 1
tm.assert_numpy_array_equal(
idx.get_indexer([2, 8]), ensure_platform_int(np.array([0, 2]))
)
with pytest.raises(KeyError, match="3"):
idx.get_loc(3)
assert "_engine" not in idx._cache
# Different types of scalars can be excluded immediately, no need to
# use the _engine
with pytest.raises(KeyError, match="'a'"):
idx.get_loc("a")
assert "_engine" not in idx._cache
def test_format_empty(self):
# GH35712
empty_idx = self._holder(0)
assert empty_idx.format() == []
assert empty_idx.format(name=True) == [""]
| bsd-3-clause |
michelle192837/test-infra | hack/analyze-memory-profiles.py | 9 | 6137 | #!/usr/bin/env python3
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is meant to be used to analyze memory profiles created by the Prow binaries when
# the --profile-memory-usage flag is passed. The interval of profiling can be set with the
# --memory-profile-interval flag. This tool can also be used on the output of the sidecar utility
# when the sidecar.Options.WriteMemoryProfile option has been set. The tools will write sequential
# profiles into a directory, from which this script can load the data, create time series and
# visualize them.
import os
import pathlib
import subprocess
import sys
from datetime import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.font_manager import FontProperties
if len(sys.argv) != 2:
print("[ERROR] Expected the directory containing profiles as the only argument.")
print("Usage: {} ./path/to/profiles/".format(sys.argv[0]))
sys.exit(1)
profile_dir = sys.argv[1]
def parse_bytes(value):
# we will either see a raw number or one with a suffix
value = value.decode("utf-8")
if not value.endswith("B"):
return float(value)
suffix = value[-2:]
multiple = 1
if suffix == "KB":
multiple = 1024
elif suffix == "MB":
multiple = 1024 * 1024
elif suffix == "GB":
multiple = 1024 * 1024 * 1024
return float(value[:-2]) * multiple
overall_name = "overall".encode("utf-8")
dates_by_name = {overall_name: []}
flat_usage_over_time = {overall_name: []}
cumulative_usage_over_time = {overall_name: []}
max_usage = 0
for subdir, dirs, files in os.walk(profile_dir):
for file in files:
full_path = os.path.join(subdir, file)
date = datetime.fromtimestamp(pathlib.Path(full_path).stat().st_mtime)
output = subprocess.run(
["go", "tool", "pprof", "-top", "-inuse_space", full_path],
check=True, stdout=subprocess.PIPE
)
# The output of go tool pprof will look like:
#
# File: sidecar
# Type: inuse_space
# Time: Mar 19, 2021 at 10:30am (PDT)
# Showing nodes accounting for 66.05MB, 100% of 66.05MB total
# flat flat% sum% cum cum%
# 64MB 96.90% 96.90% 64MB 96.90% google.golang.org/api/internal/gensupport...
#
# We want to parse all of the lines after the header and metadata.
lines = output.stdout.splitlines()
usage = parse_bytes(lines[3].split()[-2])
if usage > max_usage:
max_usage = usage
data_index = 0
for i in range(len(lines)):
if lines[i].split()[0].decode("utf-8") == "flat":
data_index = i + 1
break
flat_overall = 0
cumulative_overall = 0
for line in lines[data_index:]:
parts = line.split()
name = parts[5]
if name not in dates_by_name:
dates_by_name[name] = []
dates_by_name[name].append(date)
if name not in flat_usage_over_time:
flat_usage_over_time[name] = []
flat_usage = parse_bytes(parts[0])
flat_usage_over_time[name].append(flat_usage)
flat_overall += flat_usage
if name not in cumulative_usage_over_time:
cumulative_usage_over_time[name] = []
cumulative_usage = parse_bytes(parts[3])
cumulative_usage_over_time[name].append(cumulative_usage)
cumulative_overall += cumulative_usage
dates_by_name[overall_name].append(date)
flat_usage_over_time[overall_name].append(flat_overall)
cumulative_usage_over_time[overall_name].append(cumulative_overall)
plt.rcParams.update({'font.size': 22})
fig = plt.figure(figsize=(30, 18))
plt.subplots_adjust(right=0.7)
ax = plt.subplot(211)
for name in dates_by_name:
dates = mdates.date2num(dates_by_name[name])
values = flat_usage_over_time[name]
# we only want to show the top couple callsites, or our legend gets noisy
if max(values) > 0.01 * max_usage:
ax.plot_date(dates, values,
label="{} (max: {:,.0f}MB)".format(name.decode("utf-8"), max(values) / (1024 * 1024)),
linestyle='solid')
else:
ax.plot_date(dates, values, linestyle='solid')
ax.set_yscale('log')
ax.set_ylim(bottom=10*1024*1024)
formatter = ticker.FuncFormatter(lambda y, pos: '{:,.0f}'.format(y / (1024 * 1024)) + 'MB')
ax.yaxis.set_major_formatter(formatter)
plt.xlabel("Time")
plt.ylabel("Flat Space In Use (bytes)")
plt.title("Space In Use By Callsite")
fontP = FontProperties()
fontP.set_size('xx-small')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left', prop=fontP)
ax = plt.subplot(212)
for name in dates_by_name:
dates = mdates.date2num(dates_by_name[name])
values = cumulative_usage_over_time[name]
# we only want to show the top couple callsites, or our legend gets noisy
if max(values) > 0.01 * max_usage:
ax.plot_date(dates, values,
label="{} (max: {:,.0f}MB)".format(name.decode("utf-8"), max(values) / (1024 * 1024)),
linestyle='solid')
else:
ax.plot_date(dates, values, linestyle='solid')
ax.set_yscale('log')
ax.set_ylim(bottom=10*1024*1024)
ax.yaxis.set_major_formatter(formatter)
plt.xlabel("Time")
plt.ylabel("Cumulative Space In Use (bytes)")
fontP = FontProperties()
fontP.set_size('xx-small')
plt.legend(bbox_to_anchor=(1, 1), loc='upper left', prop=fontP)
plt.show()
| apache-2.0 |
kjung/scikit-learn | sklearn/datasets/tests/test_kddcup99.py | 59 | 1336 | """Test kddcup99 loader. Only 'percent10' mode is tested, as the full data
is too big to use in unit-testing.
The test is skipped if the data wasn't previously fetched and saved to
scikit-learn data folder.
"""
import errno
from sklearn.datasets import fetch_kddcup99
from sklearn.utils.testing import assert_equal, SkipTest
def test_percent10():
try:
data = fetch_kddcup99(download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("kddcup99 dataset can not be loaded.")
assert_equal(data.data.shape, (494021, 41))
assert_equal(data.target.shape, (494021,))
data_shuffled = fetch_kddcup99(shuffle=True, random_state=0)
assert_equal(data.data.shape, data_shuffled.data.shape)
assert_equal(data.target.shape, data_shuffled.target.shape)
data = fetch_kddcup99('SA')
assert_equal(data.data.shape, (100655, 41))
assert_equal(data.target.shape, (100655,))
data = fetch_kddcup99('SF')
assert_equal(data.data.shape, (73237, 4))
assert_equal(data.target.shape, (73237,))
data = fetch_kddcup99('http')
assert_equal(data.data.shape, (58725, 3))
assert_equal(data.target.shape, (58725,))
data = fetch_kddcup99('smtp')
assert_equal(data.data.shape, (9571, 3))
assert_equal(data.target.shape, (9571,))
| bsd-3-clause |
jmschrei/scikit-learn | examples/svm/plot_iris.py | 225 | 3252 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| bsd-3-clause |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/pandas/io/tests/test_sql.py | 9 | 102928 | """SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL, PostgreSQL)
derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback` and `TestMySQLLegacy`)
"""
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import sys
import nose
import warnings
import numpy as np
import pandas as pd
from datetime import datetime, date, time
from pandas import DataFrame, Series, Index, MultiIndex, isnull, concat
from pandas import date_range, to_datetime, to_timedelta, Timestamp
import pandas.compat as compat
from pandas.compat import StringIO, range, lrange, string_types
from pandas.core import common as com
from pandas.core.datetools import format as date_format
import pandas.io.sql as sql
from pandas.io.sql import read_sql_table, read_sql_query
import pandas.util.testing as tm
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': {
'query': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'mysql': {
'query': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'postgresql': {
'query': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'DateColWithTz', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
},
'read_parameters': {
'sqlite': "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
'mysql': 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
'postgresql': 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s'
},
'read_named_parameters': {
'sqlite': """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
'mysql': """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
'postgresql': """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
"""
},
'create_view': {
'sqlite': """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
}
}
class MixInBase(object):
def tearDown(self):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % sql._get_valid_mysql_name(table_name))
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute('SHOW TABLES')
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute("DROP TABLE IF EXISTS %s" % sql._get_valid_sqlite_name(table_name))
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest(unittest.TestCase):
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
import io
iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv')
self.drop_table('iris')
self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with io.open(iris_csv_file, mode='r', newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table('iris_view')
self._get_exec().execute(SQL_STRINGS['create_view'][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(dict(A=[4, 1, 3, 6],
B=['asd', 'gsq', 'ylt', 'jkl'],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=['1990-11-22', '1991-10-26', '1993-11-26', '1995-12-12']))
df['E'] = to_datetime(df['E'])
self.test_frame2 = df
def _load_test3_data(self):
columns = ['index', 'A', 'B']
data = [(
'2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670),
('2000-01-04 00:00:00', -29, -0.0412318367011),
('2000-01-05 00:00:00', 20000, 0.731167677815),
('2000-01-06 00:00:00', -290867, 1.56762092543)]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table('types_test_data')
self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [
{
'TextCol': 'first',
'DateCol': '2000-01-03 00:00:00',
'DateColWithTz': '2000-01-01 00:00:00-08:00',
'IntDateCol': 535852800,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': 1,
'BoolColWithNull': False,
},
{
'TextCol': 'first',
'DateCol': '2000-01-04 00:00:00',
'DateColWithTz': '2000-06-01 00:00:00-07:00',
'IntDateCol': 1356998400,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': None,
'BoolColWithNull': None,
},
]
for d in data:
self._get_exec().execute(
ins['query'],
[d[field] for field in ins['fields']]
)
def _count_rows(self, table_name):
result = self._get_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS['read_parameters'][self.flavor]
params = ['Iris-setosa', 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS['read_named_parameters'][self.flavor]
params = {'name': 'Iris-setosa', 'length': 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.drop_table('test_frame1')
def _to_sql_empty(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], 'test_frame1')
def _to_sql_fail(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
def _to_sql_replace(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _to_sql_append(self):
# Nuke table just in case
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
result = self.pandasSQL.read_query('SELECT * FROM test_frame_roundtrip')
result.set_index('level_0', inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _to_sql_save_index(self):
df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
columns=['A','B','C'], index=['A'])
self.pandasSQL.to_sql(df, 'test_to_sql_saves_index')
ix_cols = self._get_index_columns('test_to_sql_saves_index')
self.assertEqual(ix_cols, [['A',],])
def _transaction_test(self):
self.pandasSQL.execute("CREATE TABLE test_trans (A INT, B TEXT)")
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
# Make sure when transaction is rolled back, no rows get inserted
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise Exception('error')
except:
# ignore raised exception
pass
res = self.pandasSQL.read_query('SELECT * FROM test_trans')
self.assertEqual(len(res), 0)
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query('SELECT * FROM test_trans')
self.assertEqual(len(res2), 1)
#------------------------------------------------------------------------------
#--- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode (`TestSQLiteFallbackApi`).
These tests are run with sqlite3. Specific tests for the different
sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = 'sqlite'
mode = None
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
with tm.assert_produces_warning(FutureWarning):
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replace')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'),
'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='append')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'),
'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, 'test_frame5',
self.conn, flavor='sqlite', index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype='int64'), name='series')
sql.to_sql(s, "test_series", self.conn, flavor='sqlite', index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_to_sql_panel(self):
panel = tm.makePanel()
self.assertRaises(NotImplementedError, sql.to_sql, panel,
'test_panel', self.conn, flavor='sqlite')
def test_legacy_write_frame(self):
# Assume that functionality is already tested above so just do
# quick check that it basically works
with tm.assert_produces_warning(FutureWarning):
sql.write_frame(self.test_frame1, 'test_frame_legacy', self.conn,
flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'),
'Table not written to DB')
def test_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip',
con=self.conn, flavor='sqlite')
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index('level_0', inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,
index=False, flavor='sqlite', chunksize=2)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
# Test date parsing in read_sq
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
self.assertFalse(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['DateCol'])
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
index_col='DateCol',
parse_dates=['DateCol', 'IntDateCol'])
self.assertTrue(issubclass(df.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(['00:00:01', '00:00:03'], name='foo')).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql('test_timedelta', self.conn)
result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn)
tm.assert_series_equal(result['foo'], df['foo'].astype('int64'))
def test_complex(self):
df = DataFrame({'a':[1+1j, 2j]})
# Complex data type should raise error
self.assertRaises(ValueError, df.to_sql, 'test_complex', self.conn)
def test_to_sql_index_label(self):
temp_frame = DataFrame({'col1': range(4)})
# no index name, defaults to 'index'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
# using the index name
temp_frame.index.name = 'index_name'
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index_name',
"Index name not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame({'col1': range(4)},
index=MultiIndex.from_product([('A0', 'A1'), ('B0', 'B1')]))
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'level_0')
self.assertEqual(frame.columns[1], 'level_1')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['A', 'B'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Specified index_labels not written to database")
# using the index name
temp_frame.index.names = ['A', 'B']
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Index names not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['C', 'D'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['C', 'D'],
"Specified index_labels not written to database")
# wrong length of index_label
self.assertRaises(ValueError, sql.to_sql, temp_frame,
'test_index_label', self.conn, if_exists='replace',
index_label='C')
def test_multiindex_roundtrip(self):
df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
columns=['A','B','C'], index=['A','B'])
df.to_sql('test_multiindex_roundtrip', self.conn)
result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',
self.conn, index_col=['A','B'])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn,
if_exists='replace')
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite',
con=self.conn)
self.assertTrue('CREATE' in create_sql)
def test_get_schema_dtypes(self):
float_frame = DataFrame({'a':[1.1,1.2], 'b':[2.1,2.2]})
dtype = sqlalchemy.Integer if self.mode == 'sqlalchemy' else 'INTEGER'
create_sql = sql.get_schema(float_frame, 'test', 'sqlite',
con=self.conn, dtype={'b':dtype})
self.assertTrue('CREATE' in create_sql)
self.assertTrue('INTEGER' in create_sql)
def test_get_schema_keys(self):
frame = DataFrame({'Col1':[1.1,1.2], 'Col2':[2.1,2.2]})
create_sql = sql.get_schema(frame, 'test', 'sqlite',
con=self.conn, keys='Col1')
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
self.assertTrue(constraint_sentence in create_sql)
# multiple columns as key (GH10385)
create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite',
con=self.conn, keys=['A', 'B'])
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
self.assertTrue(constraint_sentence in create_sql)
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))
df.to_sql('test_chunksize', self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query("select * from test_chunksize",
self.conn, chunksize=5):
res2 = concat([res2, chunk], ignore_index=True)
self.assertEqual(len(chunk), sizes[i])
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == 'sqlalchemy':
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn,
chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
self.assertEqual(len(chunk), sizes[i])
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{'person_id': [1, 2, 3],
'person_name': ['John P. Doe', 'Jane Dove', 'John P. Doe']})
df2 = df.copy()
df2['person_name'] = df2['person_name'].astype('category')
df2.to_sql('test_categorical', self.conn, index=False)
res = sql.read_sql_query('SELECT * FROM test_categorical', self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1,2],[3,4]], columns = [u'\xe9',u'b'])
df.to_sql('test_unicode', self.conn, index=False)
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = 'sqlite'
mode = 'sqlalchemy'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
raise nose.SkipTest('SQLAlchemy not installed')
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
cols = ['A', 'B']
result = sql.read_sql_table('test_frame', self.conn, columns=cols)
self.assertEqual(result.columns.tolist(), cols,
"Columns not correctly selected")
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
result = sql.read_sql_table('test_frame', self.conn, index_col="index")
self.assertEqual(result.index.names, ["index"],
"index_col not correctly set")
result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"],
columns=["C", "D"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
self.assertEqual(result.columns.tolist(), ["C", "D"],
"columns not set correctly whith index_col")
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql(
"SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table('iris', self.conn)
iris_frame2 = sql.read_sql('iris', self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table('other_table', self.conn)
sql.read_sql_query('SELECT * FROM other_table', self.conn)
# Verify some things
self.assertEqual(len(w), 0, "Warning triggered for other table")
def test_warning_case_insensitive_table_name(self):
# see GH7815.
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql('CaseSensitive', self.conn)
# Verify some things
self.assertEqual(len(w), 0, "Warning triggered for writing a table")
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes('test_index_saved')
ixs = [i['column_names'] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
self.assertTrue(isinstance(table.table.c['time'].type, sqltypes.DateTime))
def test_to_sql_read_sql_with_database_uri(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
#db_uri = 'sqlite:///:memory:' # raises sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = 'sqlite:///' + name
table = 'iris'
test_frame1.to_sql(table, db_uri, if_exists='replace', index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = 'SELECT * FROM iris'
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table('iris', metadata,
sa.Column('SepalLength', sa.REAL),
sa.Column('SepalWidth', sa.REAL),
sa.Column('PetalLength', sa.REAL),
sa.Column('PetalWidth', sa.REAL),
sa.Column('Name', sa.TEXT)
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text('select * from iris where name=:name')
iris_df = sql.read_sql(name_text, self.conn, params={'name': 'Iris-versicolor'})
all_names = set(iris_df['Name'])
self.assertEqual(all_names, set(['Iris-versicolor']))
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(iris.c.Name == sqlalchemy.bindparam('name'))
iris_df = sql.read_sql(name_select, self.conn, params={'name': 'Iris-setosa'})
all_names = set(iris_df['Name'])
self.assertEqual(all_names, set(['Iris-setosa']))
class _EngineToConnMixin(object):
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
def setUp(self):
super(_EngineToConnMixin, self).setUp()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
def tearDown(self):
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
super(_EngineToConnMixin, self).tearDown()
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = 'sqlite'
mode = 'fallback'
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn,
flavor="sqlite", index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;",
conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
self.assertRaises(sql.DatabaseError, sql.read_sql, 'iris', self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b ']) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn,
flavor="sqlite", index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite')
self.assertTrue('CREATE' in create_sql)
def test_tquery(self):
with tm.assert_produces_warning(FutureWarning):
iris_results = sql.tquery("SELECT * FROM iris", con=self.conn)
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_uquery(self):
with tm.assert_produces_warning(FutureWarning):
rows = sql.uquery("SELECT * FROM iris LIMIT 1", con=self.conn)
self.assertEqual(rows, -1)
def _get_sqlite_column_type(self, schema, column):
for col in schema.split('\n'):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError('Column %s not found' % (column))
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLiteDatabase(self.conn, self.flavor)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
self.assertEqual(self._get_sqlite_column_type(schema, 'time'),
"TIMESTAMP")
#------------------------------------------------------------------------------
#--- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor = None
@classmethod
def setUpClass(cls):
cls.setup_import()
cls.setup_driver()
# test connection
try:
conn = cls.connect()
conn.connect()
except sqlalchemy.exc.OperationalError:
msg = "{0} - can't connect to {1} server".format(cls, cls.flavor)
raise nose.SkipTest(msg)
def setUp(self):
self.setup_connect()
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not installed')
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
raise nose.SkipTest("Can't connect to {0} server".format(self.flavor))
def test_aread_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
pandasSQL.drop_table('temp_frame')
self.assertFalse(
temp_conn.has_table('temp_frame'), 'Table not deleted from DB')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
self.assertRaises(
ValueError, sql.read_sql_table, "this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.bool_),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA values becomes object
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.object),
"BoolColWithNull loaded with incorrect type")
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={'i64':[2**62]})
df.to_sql('test_bigint', self.conn, index=False)
result = sql.read_sql_table('test_bigint', self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if com.is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to "2000-01-01 08:00:00"
self.assertEqual(col[0], Timestamp('2000-01-01 08:00:00'))
# "2000-06-01 00:00:00-07:00" should convert to "2000-06-01 07:00:00"
self.assertEqual(col[1], Timestamp('2000-06-01 07:00:00'))
elif com.is_datetime64tz_dtype(col.dtype):
self.assertTrue(str(col.dt.tz) == 'UTC')
# "2000-01-01 00:00:00-08:00" should convert to "2000-01-01 08:00:00"
self.assertEqual(col[0], Timestamp('2000-01-01 08:00:00', tz='UTC'))
# "2000-06-01 00:00:00-07:00" should convert to "2000-06-01 07:00:00"
self.assertEqual(col[1], Timestamp('2000-06-01 07:00:00', tz='UTC'))
else:
raise AssertionError("DateCol loaded with incorrect type -> {0}".format(col.dtype))
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df,'DateColWithTz'):
raise nose.SkipTest("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a Postgrsql server
# version difference
col = df.DateColWithTz
self.assertTrue(com.is_object_dtype(col.dtype) or com.is_datetime64_dtype(col.dtype) \
or com.is_datetime64tz_dtype(col.dtype),
"DateCol loaded with incorrect type -> {0}".format(col.dtype))
df = pd.read_sql_query("select * from types_test_data", self.conn, parse_dates=['DateColWithTz'])
if not hasattr(df,'DateColWithTz'):
raise nose.SkipTest("no column with datetime with time zone")
check(df.DateColWithTz)
df = pd.concat(list(pd.read_sql_query("select * from types_test_data",
self.conn,chunksize=1)),ignore_index=True)
col = df.DateColWithTz
self.assertTrue(com.is_datetime64tz_dtype(col.dtype),
"DateCol loaded with incorrect type -> {0}".format(col.dtype))
self.assertTrue(str(col.dt.tz) == 'UTC')
expected = sql.read_sql_table("types_test_data", self.conn)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz.astype('datetime64[ns, UTC]'))
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates=['DateCol'])
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn, parse_dates={
'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': {'unit': 's'}})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_datetime(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.to_sql('test_datetime', self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
result = result.drop('index', axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
result = result.drop('index', axis=1)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
result['A'] = to_datetime(result['A'])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.loc[1, 'A'] = np.nan
df.to_sql('test_datetime', self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
result['A'] = to_datetime(result['A'], errors='coerce')
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False)
res = read_sql_table('test_date', self.conn)
# comes back as datetime64
tm.assert_series_equal(res['a'], to_datetime(df['a']))
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_table('test_time', self.conn)
tm.assert_frame_equal(res, df)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2**25 + 1,dtype=np.int32)
s2 = Series(0.0,dtype=np.float32)
df = DataFrame({'s1': s1, 's2': s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({'A':[0, 1, 2], 'B':[0.2, np.nan, 5.6]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({'A':[0, 1, 2], 'B':[np.nan, np.nan, np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df['B'] = df['B'].astype('object')
df['B'] = None
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({'A':[0, 1, 2], 'B':['a', 'b', np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# NaNs are coming back as None
df.loc[2, 'B'] = None
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i['column_names'] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = 'test_get_schema_create_table'
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables['dtype_test2'].columns['B'].type
self.assertTrue(isinstance(sqltype, sqlalchemy.TEXT))
self.assertRaises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': str})
# GH9083
df.to_sql('dtype_test3', self.conn, dtype={'B': sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables['dtype_test3'].columns['B'].type
self.assertTrue(isinstance(sqltype, sqlalchemy.String))
self.assertEqual(sqltype.length, 10)
def test_notnull_dtype(self):
cols = {'Bool': Series([True,None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int' : Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notnull_dtype_test'
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == 'mysql':
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
self.assertTrue(isinstance(col_dict['Bool'].type, my_type))
self.assertTrue(isinstance(col_dict['Date'].type, sqltypes.DateTime))
self.assertTrue(isinstance(col_dict['Int'].type, sqltypes.Integer))
self.assertTrue(isinstance(col_dict['Float'].type, sqltypes.Float))
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame({'f32':Series([V,], dtype='float32'),
'f64':Series([V,], dtype='float64'),
'f64_as_f32':Series([V,], dtype='float64'),
'i32':Series([5,], dtype='int32'),
'i64':Series([5,], dtype='int64'),
})
df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace',
dtype={'f64_as_f32':sqlalchemy.Float(precision=23)})
res = sql.read_sql_table('test_dtypes', self.conn)
# check precision of float64
self.assertEqual(np.round(df['f64'].iloc[0],14),
np.round(res['f64'].iloc[0],14))
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables['test_dtypes'].columns
self.assertEqual(str(col_dict['f32'].type),
str(col_dict['f64_as_f32'].type))
self.assertTrue(isinstance(col_dict['f32'].type, sqltypes.Float))
self.assertTrue(isinstance(col_dict['f64'].type, sqltypes.Float))
self.assertTrue(isinstance(col_dict['i32'].type, sqltypes.Integer))
self.assertTrue(isinstance(col_dict['i64'].type, sqltypes.BigInteger))
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pydata/pandas/issues/10104
def foo(connection):
query = 'SELECT test_foo_data FROM test_foo_data'
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name='test_foo_data', con=connection, if_exists='append')
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({'test_foo_data': [0, 1, 2]}).to_sql('test_foo_data', self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = u'Hello, World!'
expected = DataFrame({'spam': [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = 'temp_test'
__table_args__ = {'prefixes': ['TEMPORARY']}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(
sql=sqlalchemy.select([Temporary.spam]),
con=conn,
)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
raise nose.SkipTest("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy(object):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlalchemy.create_engine('sqlite:///:memory:')
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# sqlite has no boolean type, so integer type is returned
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Non-native Bool column with NA values stays as float
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
self.assertFalse(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({'a':[1,2]}, dtype='int64')
df.to_sql('test_bigintwarning', self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table('test_bigintwarning', self.conn)
self.assertEqual(len(w), 0, "Warning triggered for other table")
class _TestMySQLAlchemy(object):
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = 'mysql'
@classmethod
def connect(cls):
url = 'mysql+{driver}://root@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import pymysql
cls.driver = 'pymysql'
except ImportError:
raise nose.SkipTest('pymysql not installed')
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# MySQL has no real BOOL type (it's an alias for TINYINT)
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA = int column with NA values => becomes float
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_read_procedure(self):
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({'a': [1, 2, 3], 'b':[0.1, 0.2, 0.3]})
df.to_sql('test_procedure', self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc)
trans.commit()
except:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy(object):
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = 'postgresql'
@classmethod
def connect(cls):
url = 'postgresql+{driver}://postgres@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import psycopg2
cls.driver = 'psycopg2'
except ImportError:
raise nose.SkipTest('psycopg2 not installed')
def test_schema_support(self):
# only test this for postgresql (schema's not supported in mysql/sqlite)
df = DataFrame({'col1':[1, 2], 'col2':[0.1, 0.2], 'col3':['a', 'n']})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql('test_schema_public', self.conn, index=False)
df.to_sql('test_schema_public_explicit', self.conn, index=False,
schema='public')
df.to_sql('test_schema_other', self.conn, index=False, schema='other')
# read dataframes back in
res1 = sql.read_sql_table('test_schema_public', self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table('test_schema_public_explicit', self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table('test_schema_public_explicit', self.conn,
schema='public')
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table('test_schema_other', self.conn,
schema='other')
tm.assert_frame_equal(df, res4)
self.assertRaises(ValueError, sql.read_sql_table, 'test_schema_other',
self.conn, schema='public')
## different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql('test_schema_other', self.conn, schema='other', index=False)
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='replace')
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='append')
res = sql.read_sql_table('test_schema_other', self.conn, schema='other')
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
## specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema='other')
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, 'test_schema_other2', index=False)
pdsql.to_sql(df, 'test_schema_other2', index=False, if_exists='replace')
pdsql.to_sql(df, 'test_schema_other2', index=False, if_exists='append')
res1 = sql.read_sql_table('test_schema_other2', self.conn, schema='other')
res2 = pdsql.read_table('test_schema_other2')
tm.assert_frame_equal(res1, res2)
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
#------------------------------------------------------------------------------
#--- Test Sqlite / MySQL fallback
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlite3.connect(':memory:')
def setUp(self):
self.conn = self.connect()
self.pandasSQL = sql.SQLiteDatabase(self.conn, 'sqlite')
self._load_iris_data()
self._load_test1_data()
def test_invalid_flavor(self):
self.assertRaises(
NotImplementedError, sql.SQLiteDatabase, self.conn, 'oracle')
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
self.pandasSQL.to_sql(temp_frame, 'drop_test_frame')
self.assertTrue(self.pandasSQL.has_table('drop_test_frame'),
'Table not written to DB')
self.pandasSQL.drop_table('drop_test_frame')
self.assertFalse(self.pandasSQL.has_table('drop_test_frame'),
'Table not deleted from DB')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False, flavor=self.flavor)
res = read_sql_query('SELECT * FROM test_date', self.conn)
if self.flavor == 'sqlite':
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == 'mysql':
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
# test it raises an error and not fails silently (GH8341)
if self.flavor == 'sqlite':
self.assertRaises(sqlite3.InterfaceError, sql.to_sql, df,
'test_time', self.conn)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' " +
"AND tbl_name = '%s'" % tbl_name, self.conn)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(
"PRAGMA index_info(%s)" % ix_name, self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute('PRAGMA table_info(%s)' % table)
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError('Table %s, column %s not found' % (table, column))
def test_dtype(self):
if self.flavor == 'mysql':
raise nose.SkipTest('Not applicable to MySQL legacy')
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': 'STRING'})
# sqlite stores Boolean values as INTEGER
self.assertEqual(self._get_sqlite_column_type('dtype_test', 'B'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type('dtype_test2', 'B'), 'STRING')
self.assertRaises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': bool})
def test_notnull_dtype(self):
if self.flavor == 'mysql':
raise nose.SkipTest('Not applicable to MySQL legacy')
cols = {'Bool': Series([True,None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int' : Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notnull_dtype_test'
df.to_sql(tbl, self.conn)
self.assertEqual(self._get_sqlite_column_type(tbl, 'Bool'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Date'), 'TIMESTAMP')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Int'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Float'), 'REAL')
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
# Raise error on blank
self.assertRaises(ValueError, df.to_sql, "", self.conn,
flavor=self.flavor)
for ndx, weird_name in enumerate(['test_weird_name]','test_weird_name[',
'test_weird_name`','test_weird_name"', 'test_weird_name\'',
'_b.test_weird_name_01-30', '"_b.test_weird_name_01-30"',
'99beginswithnumber', '12345', u'\xe9']):
df.to_sql(weird_name, self.conn, flavor=self.flavor)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', weird_name])
c_tbl = 'test_weird_col_name%d'%ndx
df2.to_sql(c_tbl, self.conn, flavor=self.flavor)
sql.table_exists(c_tbl, self.conn)
class TestMySQLLegacy(MySQLMixIn, TestSQLiteFallback):
"""
Test the legacy mode against a MySQL database.
"""
flavor = 'mysql'
@classmethod
def setUpClass(cls):
cls.setup_driver()
# test connection
try:
cls.connect()
except cls.driver.err.OperationalError:
raise nose.SkipTest("{0} - can't connect to MySQL server".format(cls))
@classmethod
def setup_driver(cls):
try:
import pymysql
cls.driver = pymysql
except ImportError:
raise nose.SkipTest('pymysql not installed')
@classmethod
def connect(cls):
return cls.driver.connect(host='127.0.0.1', user='root', passwd='', db='pandas_nosetest')
def _count_rows(self, table_name):
cur = self._get_exec()
cur.execute(
"SELECT count(*) AS count_1 FROM %s" % table_name)
rows = cur.fetchall()
return rows[0][0]
def setUp(self):
try:
self.conn = self.connect()
except self.driver.err.OperationalError:
raise nose.SkipTest("Can't connect to MySQL server")
self.pandasSQL = sql.SQLiteDatabase(self.conn, 'mysql')
self._load_iris_data()
self._load_test1_data()
def test_a_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn,
flavor='mysql')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='mysql'),
'Table not written to DB')
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SHOW INDEX IN %s" % tbl_name, self.conn)
ix_cols = {}
for ix_name, ix_col in zip(ixs.Key_name, ixs.Column_name):
if ix_name not in ix_cols:
ix_cols[ix_name] = []
ix_cols[ix_name].append(ix_col)
return list(ix_cols.values())
def test_to_sql_save_index(self):
self._to_sql_save_index()
for ix_name, ix_col in zip(ixs.Key_name, ixs.Column_name):
if ix_name not in ix_cols:
ix_cols[ix_name] = []
ix_cols[ix_name].append(ix_col)
return ix_cols.values()
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_illegal_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
# These tables and columns should be ok
for ndx, ok_name in enumerate(['99beginswithnumber','12345']):
df.to_sql(ok_name, self.conn, flavor=self.flavor, index=False,
if_exists='replace')
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', ok_name])
df2.to_sql('test_ok_col_name', self.conn, flavor=self.flavor, index=False,
if_exists='replace')
# For MySQL, these should raise ValueError
for ndx, illegal_name in enumerate(['test_illegal_name]','test_illegal_name[',
'test_illegal_name`','test_illegal_name"', 'test_illegal_name\'', '']):
self.assertRaises(ValueError, df.to_sql, illegal_name, self.conn,
flavor=self.flavor, index=False)
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', illegal_name])
self.assertRaises(ValueError, df2.to_sql, 'test_illegal_col_name%d'%ndx,
self.conn, flavor=self.flavor, index=False)
#------------------------------------------------------------------------------
#--- Old tests from 0.13.1 (before refactor using sqlalchemy)
_formatters = {
datetime: lambda dt: "'%s'" % date_format(dt),
str: lambda x: "'%s'" % x,
np.str_: lambda x: "'%s'" % x,
compat.text_type: lambda x: "'%s'" % x,
compat.binary_type: lambda x: "'%s'" % x,
float: lambda x: "%.8f" % x,
int: lambda x: "%s" % x,
type(None): lambda x: "NULL",
np.float64: lambda x: "%.10f" % x,
bool: lambda x: "'%s'" % x,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isnull(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def _skip_if_no_pymysql():
try:
import pymysql
except ImportError:
raise nose.SkipTest('pymysql not installed, skipping')
class TestXSQLite(SQLiteMixIn, tm.TestCase):
def setUp(self):
self.conn = sqlite3.connect(':memory:')
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
create_sql = sql.get_schema(frame, 'test', 'sqlite')
cur = self.conn.cursor()
cur.execute(create_sql)
cur = self.conn.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_frame("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite')
cur = self.conn.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.ix[0]
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_frame("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
self.assertTrue(tokens[1] == 'DATETIME')
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite', keys=['A', 'B'],)
lines = create_sql.splitlines()
self.assertTrue('PRIMARY KEY ("A", "B")' in create_sql)
cur = self.conn.cursor()
cur.execute(create_sql)
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.conn)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.tquery, "select * from test",
con=self.conn)
finally:
sys.stdout = sys.__stdout__
# Initialize connection again (needed for tearDown)
self.setUp()
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.write_frame(frame, name='test_table', con=self.conn)
result = sql.read_frame("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
frame2['Idx'] = Index(lrange(len(frame2))) + 10
sql.write_frame(frame2, name='test_table2', con=self.conn)
result = sql.read_frame("select * from test_table2", self.conn,
index_col='Idx')
expected = frame.copy()
expected.index = Index(lrange(len(frame2))) + 10
expected.index.name = 'Idx'
tm.assert_frame_equal(expected, result)
def test_tquery(self):
frame = tm.makeTimeDataFrame()
sql.write_frame(frame, name='test_table', con=self.conn)
result = sql.tquery("select A from test_table", self.conn)
expected = Series(frame.A.values, frame.index) # not to have name
result = Series(result, frame.index)
tm.assert_series_equal(result, expected)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.conn)
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.conn, retry=True)
finally:
sys.stdout = sys.__stdout__
def test_uquery(self):
frame = tm.makeTimeDataFrame()
sql.write_frame(frame, name='test_table', con=self.conn)
stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)'
self.assertEqual(sql.uquery(stmt, con=self.conn), 1)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.conn)
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.conn,
retry=True)
finally:
sys.stdout = sys.__stdout__
def test_keyword_as_column_names(self):
'''
'''
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.conn, name = 'testkeywords')
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df=DataFrame([1 , 2], columns=['c0'])
sql.write_frame(mono_df, con = self.conn, name = 'mono_df')
# computing the sum via sql
con_x=self.conn
the_sum=sum([my_c0[0] for my_c0 in con_x.execute("select * from mono_df")])
# it should not fail, and gives 3 ( Issue #3628 )
self.assertEqual(the_sum , 3)
result = sql.read_frame("select * from mono_df",con_x)
tm.assert_frame_equal(result,mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
flavor='sqlite',
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.write_frame(frame=df_if_exists_1, con=self.conn, name=table_name,
flavor='sqlite', if_exists='fail')
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
flavor='sqlite',
if_exists='fail')
# test if_exists='replace'
sql.write_frame(frame=df_if_exists_1, con=self.conn, name=table_name,
flavor='sqlite', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.conn, name=table_name,
flavor='sqlite', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.write_frame(frame=df_if_exists_1, con=self.conn, name=table_name,
flavor='sqlite', if_exists='fail')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.conn, name=table_name,
flavor='sqlite', if_exists='append')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
class TestXMySQL(MySQLMixIn, tm.TestCase):
@classmethod
def setUpClass(cls):
_skip_if_no_pymysql()
# test connection
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError as e:
raise nose.SkipTest(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error as e:
raise nose.SkipTest(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def setUp(self):
_skip_if_no_pymysql()
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
self.conn = pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
self.conn = pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError as e:
raise nose.SkipTest(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error as e:
raise nose.SkipTest(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def test_basic(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql')
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_frame("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql')
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.ix[0].values.tolist()
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_frame("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'mysql')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
self.assertTrue(tokens[1] == 'DATETIME')
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql', keys=['A', 'B'],)
lines = create_sql.splitlines()
self.assertTrue('PRIMARY KEY (`A`, `B`)' in create_sql)
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
def test_execute_fail(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.conn)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.tquery, "select * from test",
con=self.conn)
finally:
sys.stdout = sys.__stdout__
# Initialize connection again (needed for tearDown)
self.setUp()
def test_na_roundtrip(self):
_skip_if_no_pymysql()
pass
def _check_roundtrip(self, frame):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.conn, flavor='mysql')
result = sql.read_frame("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
index = Index(lrange(len(frame2))) + 10
frame2['Idx'] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.write_frame(frame2, name='test_table2', con=self.conn, flavor='mysql')
result = sql.read_frame("select * from test_table2", self.conn,
index_col='Idx')
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
def test_tquery(self):
try:
import pymysql
except ImportError:
raise nose.SkipTest("no pymysql")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.conn, flavor='mysql')
result = sql.tquery("select A from test_table", self.conn)
expected = Series(frame.A.values, frame.index) # not to have name
result = Series(result, frame.index)
tm.assert_series_equal(result, expected)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.conn)
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.conn, retry=True)
finally:
sys.stdout = sys.__stdout__
def test_uquery(self):
try:
import pymysql
except ImportError:
raise nose.SkipTest("no pymysql")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.conn, flavor='mysql')
stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)'
self.assertEqual(sql.uquery(stmt, con=self.conn), 1)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.conn)
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.conn,
retry=True)
finally:
sys.stdout = sys.__stdout__
def test_keyword_as_column_names(self):
'''
'''
_skip_if_no_pymysql()
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.conn, name = 'testkeywords',
if_exists='replace', flavor='mysql')
def test_if_exists(self):
_skip_if_no_pymysql()
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
flavor='mysql',
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.write_frame(frame=df_if_exists_1, con=self.conn, name=table_name,
flavor='mysql', if_exists='fail')
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
flavor='mysql',
if_exists='fail')
# test if_exists='replace'
sql.write_frame(frame=df_if_exists_1, con=self.conn, name=table_name,
flavor='mysql', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.conn, name=table_name,
flavor='mysql', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.write_frame(frame=df_if_exists_1, con=self.conn, name=table_name,
flavor='mysql', if_exists='fail')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.conn, name=table_name,
flavor='mysql', if_exists='append')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
xxd3vin/spp-sdk | opt/Python27/Lib/site-packages/numpy/lib/recfunctions.py | 23 | 34483 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for matplotlib.
They have been rewritten and extended for convenience.
"""
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
_check_fill_value = np.ma.core._check_fill_value
__all__ = ['append_fields',
'drop_fields',
'find_duplicates',
'get_fieldstructure',
'join_by',
'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields',
'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields as keys and a list of parent fields as values.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
# if (lastparent[-1] != lastname):
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if hasattr(element, '__iter__') and not isinstance(element, basestring):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarray : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in itertools.izip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).iteritems():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays,
fill_value= -1, flatten=False, usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = map(np.asanyarray, seqarrays)
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the fields
to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype (`asrecarray=False`)
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
#
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
#
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
#
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append((newname,
_recursive_rename_fields(current, namemapper)))
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value= -1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else :
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
seqarrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" % \
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array.
An exception is raised if the `key` field cannot be found in the two input
arrays.
Neither `r1` nor `r2` should have any duplicates along `key`: the presence
of duplicates will make the output quite unreliable. Note that duplicates
are not looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of r1
not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1 not
in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present in r2
but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present in r1
but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for the
two arrays and concatenating the result. This array is then sorted, and
the common entries selected. The output is constructed by filling the fields
with the selected entries. Matching is not preserved if there are some
duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError("The 'jointype' argument should be in 'inner', "\
"'outer' or 'leftouter' (got '%s' instead)" % jointype)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
(nb1, nb2) = (len(r1), len(r2))
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = names.index(name)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names:
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names:
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| mit |
harisbal/pandas | pandas/tests/series/test_duplicates.py | 2 | 4227 | # coding=utf-8
import numpy as np
import pytest
from pandas import Categorical, Series
import pandas.util.testing as tm
def test_value_counts_nunique():
# basics.rst doc example
series = Series(np.random.randn(500))
series[20:500] = np.nan
series[10:20] = 5000
result = series.nunique()
assert result == 11
# GH 18051
s = Series(Categorical([]))
assert s.nunique() == 0
s = Series(Categorical([np.nan]))
assert s.nunique() == 0
def test_unique():
# GH714 also, dtype=float
s = Series([1.2345] * 100)
s[::2] = np.nan
result = s.unique()
assert len(result) == 2
s = Series([1.2345] * 100, dtype='f4')
s[::2] = np.nan
result = s.unique()
assert len(result) == 2
# NAs in object arrays #714
s = Series(['foo'] * 100, dtype='O')
s[::2] = np.nan
result = s.unique()
assert len(result) == 2
# decision about None
s = Series([1, 2, 3, None, None, None], dtype=object)
result = s.unique()
expected = np.array([1, 2, 3, None], dtype=object)
tm.assert_numpy_array_equal(result, expected)
# GH 18051
s = Series(Categorical([]))
tm.assert_categorical_equal(s.unique(), Categorical([]), check_dtype=False)
s = Series(Categorical([np.nan]))
tm.assert_categorical_equal(s.unique(), Categorical([np.nan]),
check_dtype=False)
def test_unique_data_ownership():
# it works! #1807
Series(Series(["a", "c", "b"]).unique()).sort_values()
def test_is_unique():
# GH11946
s = Series(np.random.randint(0, 10, size=1000))
assert s.is_unique is False
s = Series(np.arange(1000))
assert s.is_unique is True
def test_is_unique_class_ne(capsys):
# GH 20661
class Foo(object):
def __init__(self, val):
self._value = val
def __ne__(self, other):
raise Exception("NEQ not supported")
li = [Foo(i) for i in range(5)]
s = Series(li, index=[i for i in range(5)])
_, err = capsys.readouterr()
s.is_unique
_, err = capsys.readouterr()
assert len(err) == 0
@pytest.mark.parametrize(
'keep, expected',
[
('first', Series([False, False, False, False, True, True, False])),
('last', Series([False, True, True, False, False, False, False])),
(False, Series([False, True, True, False, True, True, False]))
])
def test_drop_duplicates_non_bool(any_numpy_dtype, keep, expected):
tc = Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(any_numpy_dtype))
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize('keep, expected',
[('first', Series([False, False, True, True])),
('last', Series([True, True, False, False])),
(False, Series([True, True, True, True]))])
def test_drop_duplicates_bool(keep, expected):
tc = Series([True, False, True, False])
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True], name='name')),
('last', Series([True, True, False, False, False], name='name')),
(False, Series([True, True, True, False, True], name='name'))
])
def test_duplicated_keep(keep, expected):
s = Series(['a', 'b', 'b', 'c', 'a'], name='name')
result = s.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True])),
('last', Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True]))
])
def test_duplicated_nan_none(keep, expected):
s = Series([np.nan, 3, 3, None, np.nan], dtype=object)
result = s.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
poldrack/myconnectome | myconnectome/rnaseq/predict_svm_behav_rnaseq.py | 2 | 2055 | """
use SVM to predict outcome variables based on connectivity
"""
import numpy
import sklearn.preprocessing
import sklearn.linear_model
import scipy.stats
from run_classification import run_classification
from load_myconnectome_data import *
xvar_names=['panas.positive','panas.negative','panas.fatigue','afterscan.Anxietyduringscan','afterscan.diastolic',
'afterscan.pulse','afterscan.systolic','morning.Sleepquality','morning.Soreness','prevevening.Alcohol',
'prevevening.Guthealth','prevevening.Psoriasisseverity','prevevening.Stress', 'prevevening.Timespentoutdoors',
"email.LIWC_negemo","email.LIWC_posemo",'zeo.zq']
rnaseq_data,gene_names,rnaseq_dates,rnaseq_subcodes=load_rnaseq_data(use_wgcna=False)
behavdata,behav_vars,behav_dates,behav_subcodes=load_behav_data(xvars=xvar_names)
rnaseq_joint,behavdata_joint,subcodes_joint=get_matching_datasets(rnaseq_data,behavdata,rnaseq_subcodes,behav_subcodes)
type='regression'
#f=open('/Users/poldrack/Dropbox/data/selftracking/prediction_results/wgcna_behav_predict.txt','w')
predacc=run_classification(rnaseq_joint,behavdata_joint,n_train_runs=50,verbose=False,type=type,clf=sklearn.linear_model.Lasso())
asdf
predacc_null=run_classification(rnaseq_joint,behavdata_joint,n_shuffle_runs=1000,shuffle=True,verbose=False,type=type)
#f.write('%s\t%f\t%f\n'%(behav_vars[varnum],numpy.mean(predacc),pval))
#f.close()
max_all=numpy.max(predacc_null,1)
pcorr=numpy.zeros(predacc.shape[1])
puncorr=numpy.zeros(predacc.shape[1])
for var in range(predacc.shape[1]):
pcorr[var]=(100.0 - scipy.stats.percentileofscore(max_all, numpy.mean(predacc[:,var])))/100.0
puncorr[var]=(100.0 - scipy.stats.percentileofscore(predacc_null[:,var], numpy.mean(predacc[:,var])))/100.0
print xvar_names[var],numpy.mean(predacc[:,var]),puncorr[var],pcorr[var]
numpy.save('/Users/poldrack/Dropbox/data/selftracking/prediction_results/predacc_rnaseq_behav.npy',predacc)
numpy.save('/Users/poldrack/Dropbox/data/selftracking/prediction_results/predacc_null_rnaseq_behav.npy',predacc_null)
| mit |
kdebrab/pandas | pandas/tests/indexes/multi/test_copy.py | 2 | 4111 | # -*- coding: utf-8 -*-
from copy import copy, deepcopy
import pandas.util.testing as tm
from pandas import (CategoricalIndex, IntervalIndex, MultiIndex, PeriodIndex,
RangeIndex, Series, compat)
def assert_multiindex_copied(copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(idx):
i_copy = idx.copy()
assert_multiindex_copied(i_copy, idx)
def test_shallow_copy(idx):
i_copy = idx._shallow_copy()
assert_multiindex_copied(i_copy, idx)
def test_view(idx):
i_view = idx.view()
assert_multiindex_copied(i_view, idx)
def test_copy_name(idx):
# gh-12309: Check that the "name" argument
# passed at initialization is honored.
# TODO: Remove or refactor MultiIndex not tested.
for name, index in compat.iteritems({'idx': idx}):
if isinstance(index, MultiIndex):
continue
first = index.__class__(index, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
# Not using tm.assert_index_equal() since names differ.
assert index.equals(first)
assert first.name == 'mario'
assert second.name == 'mario'
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if not isinstance(index, CategoricalIndex):
# See gh-13365
s3 = s1 * s2
assert s3.index.name == 'mario'
def test_ensure_copied_data(idx):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
# TODO: REMOVE THIS TEST. MultiIndex is tested seperately as noted below.
for name, index in compat.iteritems({'idx': idx}):
init_kwargs = {}
if isinstance(index, PeriodIndex):
# Needs "freq" specification:
init_kwargs['freq'] = index.freq
elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):
# RangeIndex cannot be initialized from data
# MultiIndex and CategoricalIndex are tested separately
continue
index_type = index.__class__
result = index_type(index.values, copy=True, **init_kwargs)
tm.assert_index_equal(index, result)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='copy')
if isinstance(index, PeriodIndex):
# .values an object array of Period, thus copied
result = index_type(ordinal=index.asi8, copy=False,
**init_kwargs)
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
elif isinstance(index, IntervalIndex):
# checked in test_interval.py
pass
else:
result = index_type(index.values, copy=False, **init_kwargs)
tm.assert_numpy_array_equal(index.values, result.values,
check_same='same')
tm.assert_numpy_array_equal(index._ndarray_values,
result._ndarray_values,
check_same='same')
def test_copy_and_deepcopy(indices):
if isinstance(indices, MultiIndex):
return
for func in (copy, deepcopy):
idx_copy = func(indices)
assert idx_copy is not indices
assert idx_copy.equals(indices)
new_copy = indices.copy(deep=True, name="banana")
assert new_copy.name == "banana"
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 75 | 34122 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
nouiz/pylearn2 | pylearn2/models/svm.py | 21 | 3386 | """Wrappers for SVM models."""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
import warnings
try:
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
except ImportError:
warnings.warn("Could not import sklearn.")
class OneVsRestClassifier(object):
"""
Dummy replacement for `sklearn.multiclass.OneVsRestClassifier`.
Parameters
----------
estimator : see `sklearn` doc.
See `sklearn` doc.
Notes
-----
This class is a dummy class included so that sphinx
can import DenseMulticlassSVM and document it even
when sklearn is not installed.
"""
def __init__(self, estimator):
raise RuntimeError("sklearn not available.")
class DenseMulticlassSVM(OneVsRestClassifier):
"""
sklearn does very different things behind the scenes depending
upon the exact identity of the class you use. The only way to
get an SVM implementation that works with dense data is to use
the `SVC` class, which implements one-against-one
classification. This wrapper uses it to implement one-against-
rest classification, which generally works better in my
experiments.
To avoid duplicating the training data, use only numpy ndarrays
whose tags.c_contigous flag is true, and which are in float64
format.
Parameters
----------
C : float
SVM regularization parameter.
See SVC.__init__ for details.
kernel : str
Type of kernel to use.
See SVC.__init__ for details.
gamma : float
Optional parameter of kernel.
See SVC.__init__ for details.
coef0 : float
Optional parameter of kernel.
See SVC.__init__ for details.
degree : int
Degree of kernel, if kernel is polynomial.
See SVC.__init__ for details.
"""
def __init__(self, C, kernel='rbf', gamma=1.0, coef0=1.0, degree=3):
estimator = SVC(C=C, kernel=kernel, gamma=gamma, coef0=coef0,
degree=degree)
super(DenseMulticlassSVM, self).__init__(estimator)
def fit(self, X, y):
"""
Fit underlying estimators.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
super(DenseMulticlassSVM, self).fit(X, y)
return self
def decision_function(self, X):
"""
Returns the distance of each sample from the decision boundary for
each class.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A 2D ndarray with each row containing the input features for one
example.
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
return np.column_stack([estimator.decision_function(X)
for estimator in self.estimators_])
| bsd-3-clause |
sdvillal/manysources | manysources/analyses/better_or_worse_coocc.py | 1 | 6541 | '''
For each experiment: are the resulting losses (or AUC) better or worse than average?
'''
from manysources.analyses.losses import read_losses
from collections import defaultdict
import pandas as pd
import numpy as np
def average_loss(dset, feats, model, lso, calibration):
"""
At each expid, we get 1 loss per molecule. We average this. Then we average across all expids
Returns: a dataframe with one row per expid,foldid and the average loss as a column
the average of all losses across all expids
the standard deviation of all losses across all expids
"""
df_losses, df_folds = read_losses(dset=dset, feats=feats, model=model, calibration=calibration, lso=lso,
also_folds=True)
#print df_folds
df_mean = df_losses.mean(axis=1) # for each expid, we get the average loss across all molecules
total_mean = df_mean.mean(axis=0)
total_std = df_mean.std(axis=0)
# Now what we really want is how each split performed, so we need to add a mask at each expid depending on the fold
# so as to get a expid,foldid: mean loss for each fold of each expid
df_means_by_fold = [df_losses[df_folds==fold].mean(axis=1) for fold in range(10)]
big_df = pd.concat(df_means_by_fold, axis=1) # df with expids rows and 10 columns (max 10 folds per expids)
big_df['expid'] = big_df.index # copy index so it does not get lost during melting
tidy_df = pd.melt(big_df, value_name='mean loss', var_name='foldid', id_vars='expid').dropna()
return tidy_df, total_mean, total_std
def to_y(tidy_df, mean, std):
# We could choose to apply many rules. For now: if the loss is higher than mean+std --> class 1, if loss is lower
# than mean-std --> class 0. Drop the rest
tidy_df['class1'] = tidy_df['mean loss'] > (mean + std)
tidy_df['class0'] = tidy_df['mean loss'] < (mean - std)
# drop the lines where we are not in class 1 nor class 2
tidy_df = tidy_df[(tidy_df.class0 != False) | (tidy_df.class1 != False)] # at least one of the 2 columns is True
bla = tidy_df.drop('class0', axis=1)
y_df = bla.drop('mean loss', axis=1)
y_df['class1'] = y_df['class1'].astype('int')
return y_df
def get_xy(dset, feats, model, lso, y_df):
from manysources.analyses.cooccurrences import molecules_coocurrences_df
# get cooccurrences of compounds, along with the corresponding expids and fold ids as lists
cooc, molids, expids, folds = molecules_coocurrences_df(dset, feats=feats, model=model, lso=lso)
#print coocurrences
#print expids
#print folds
cooccurrences_dict = defaultdict(list)
for i in range(len(cooc)):
cooccurrences_dict[(expids[i], folds[i])] = cooc[i]
expids_in_y = y_df['expid']
folds_in_y = y_df['foldid']
y = np.array(y_df['class1'])
X = []
[X.append(cooccurrences_dict[(expid,foldid)]) for expid,foldid in zip(expids_in_y, folds_in_y)]
X = np.array(X, dtype=np.int)
return X, y
def cv_splits(num_points, Y, num_folds, rng=None, stratify=True, banned_train=None, banned_test=None):
"""
(Stratified) cross-validation (from oscail.common.evaluation).
Parameters:
- num_points: the number of elements to split
- Y: the group for each point (e.g. the class, the score, the source...)
- num_folds: the number of splits
- rng: an instance of a python/numpy random number generator
- stratify: if True, a best effort is carried to keep consistent the Y proportions in each split
- banned_train: a list of indices to not include in train (e.g. non-target class for OCC) or None
- banned_test: a list of indices to not include in test or None
Returns a function that maps a fold index (from 0 to num_folds-1) to the indices of its train/test instances.
"""
if rng is None:
rng = np.random.RandomState(0)
permutation = rng.permutation(num_points)
if stratify:
permutation = permutation[np.argsort(Y[permutation])]
folds = [permutation[base::num_folds] for base in range(num_folds)]
seed = rng.randint(1024*1024*1024)
banned_train = set() if not banned_train else set(banned_train)
banned_test = set() if not banned_test else set(banned_test)
def cver(fold):
if fold >= num_folds:
raise Exception('There are not so many folds (requested fold %d for a cross-val of %d folds)' %
(fold, num_folds))
rng = np.random.RandomState(seed)
# Now select and permute test indices, removing the banned items.
# N.B. Order of operations matter...
test_indices = folds[fold][rng.permutation(len(folds[fold]))]
ban_in_train = set(test_indices) | banned_train
train_indices = [train_i for train_i in rng.permutation(num_points)
if train_i not in ban_in_train]
test_indices = [test_i for test_i in test_indices if test_i not in banned_test]
return train_indices, test_indices
return cver
def build_and_validate_logreg(X, y, cv=10):
from sklearn.linear_model import LogisticRegression
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import roc_auc_score
parameters = {'penalty': ['l1', 'l2'], 'C': [1E-6, 1E-5, 1E-4, 1E-3, 1E-2, 1E-1, 0.5, 1, 1.5, 5, 10, 100]}
model = LogisticRegression()
model = GridSearchCV(model, parameters, n_jobs=4)
cver = cv_splits(len(y), y, num_folds=cv)
auc_results = []
for fold in range(cv):
print fold
print y
train_indices, test_indices = cver(fold)
Xtrain = X[train_indices, :]
ytrain = y[train_indices]
Xtest = X[test_indices, :]
ytest = X[test_indices, :]
# Train
model.fit(Xtrain, ytrain)
# Check results
print model.best_score_
print model.best_params_
yhat = model.predict_proba(Xtest)
auc_results.append(roc_auc_score(ytest, yhat))
return np.mean(np.array(auc_results))
def build_logreg(X, y, penalty, C):
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(penalty=penalty, C=C)
m = model.fit(X,y)
coeffs = m.coef_
print len(coeffs[0])
print coeffs
if __name__ == '__main__':
tidy_df, total_mean, total_std = average_loss('bcrp', 'ecfps1', 'logreg3', True, '0-1')
y_df = to_y(tidy_df, total_mean, total_std)
X, y = get_xy('bcrp', 'ecfps1', 'logreg3', True, y_df)
build_logreg(X, y, penalty='l2', C=0.001) | bsd-3-clause |
NelisVerhoef/scikit-learn | sklearn/tree/export.py | 78 | 15814 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
datapythonista/pandas | pandas/core/internals/base.py | 1 | 4002 | """
Base class for the internal managers. Both BlockManager and ArrayManager
inherit from this class.
"""
from typing import (
List,
Optional,
TypeVar,
)
from pandas._typing import (
DtypeObj,
Shape,
final,
)
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.cast import find_common_type
from pandas.core.base import PandasObject
from pandas.core.indexes.api import Index
T = TypeVar("T", bound="DataManager")
class DataManager(PandasObject):
# TODO share more methods/attributes
axes: List[Index]
@property
def items(self) -> Index:
raise AbstractMethodError(self)
def __len__(self) -> int:
return len(self.items)
@property
def ndim(self) -> int:
return len(self.axes)
@property
def shape(self) -> Shape:
return tuple(len(ax) for ax in self.axes)
@final
def _validate_set_axis(self, axis: int, new_labels: Index) -> None:
# Caller is responsible for ensuring we have an Index object.
old_len = len(self.axes[axis])
new_len = len(new_labels)
if axis == 1 and len(self.items) == 0:
# If we are setting the index on a DataFrame with no columns,
# it is OK to change the length.
pass
elif new_len != old_len:
raise ValueError(
f"Length mismatch: Expected axis has {old_len} elements, new "
f"values have {new_len} elements"
)
def reindex_indexer(
self: T,
new_axis,
indexer,
axis: int,
fill_value=None,
allow_dups: bool = False,
copy: bool = True,
consolidate: bool = True,
only_slice: bool = False,
) -> T:
raise AbstractMethodError(self)
@final
def reindex_axis(
self: T,
new_index: Index,
axis: int,
fill_value=None,
consolidate: bool = True,
only_slice: bool = False,
) -> T:
"""
Conform data manager to new index.
"""
new_index, indexer = self.axes[axis].reindex(new_index)
return self.reindex_indexer(
new_index,
indexer,
axis=axis,
fill_value=fill_value,
copy=False,
consolidate=consolidate,
only_slice=only_slice,
)
def _equal_values(self: T, other: T) -> bool:
"""
To be implemented by the subclasses. Only check the column values
assuming shape and indexes have already been checked.
"""
raise AbstractMethodError(self)
def equals(self, other: object) -> bool:
"""
Implementation for DataFrame.equals
"""
if not isinstance(other, DataManager):
return False
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
return self._equal_values(other)
def apply(
self: T,
f,
align_keys: Optional[List[str]] = None,
ignore_failures: bool = False,
**kwargs,
) -> T:
raise AbstractMethodError(self)
def isna(self: T, func) -> T:
return self.apply("apply", func=func)
class SingleDataManager(DataManager):
ndim = 1
@property
def array(self):
"""
Quick access to the backing array of the Block or SingleArrayManager.
"""
return self.arrays[0] # type: ignore[attr-defined]
def interleaved_dtype(dtypes: List[DtypeObj]) -> Optional[DtypeObj]:
"""
Find the common dtype for `blocks`.
Parameters
----------
blocks : List[DtypeObj]
Returns
-------
dtype : np.dtype, ExtensionDtype, or None
None is returned when `blocks` is empty.
"""
if not len(dtypes):
return None
return find_common_type(dtypes)
| bsd-3-clause |
LohithBlaze/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
tbekolay/neurotools | neurotools/visualization/__init__.py | 1 | 6758 | import sys, os.path
import numpy
import tempfile, shutil
import logging
from neurotools import check_dependency
from neurotools.plotting import progress_bar
if check_dependency('matplotlib'):
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.image import imread
import matplotlib.cm
class MultipanelMovie(object):
def __init__(self, title='', frame_duration=40.0):
self.fig = Figure()
self.canvas = FigureCanvas(self.fig)
self.panels = []
self.title = title
self.frame_duration = frame_duration
def add_panel(self, obj, bottomleft_corner, width, height):
"""
obj must be an object that has a string attribute `plot_function` and
a method `next_frame()`.
"""
panel = self.fig.add_axes([bottomleft_corner[0], bottomleft_corner[1], width, height])
self.panels.append((panel, obj))
return panel
def get_panel(self, obj):
for panel,_obj in self.panels:
if obj_ == obj:
return panel
return None
def add_text(self, bottomleft_corner, text, **kwargs):
x, y = bottomleft_corner
self.fig.text(x, y, text, **kwargs)
def write_frames(self, nframes):
if nframes >= 1e6:
raise Exception("Cannot handle movies with 1 million frames or more.")
self.frame_directory = tempfile.mkdtemp(prefix='tmp_neurotools_visualization_')
time_label = self.fig.text(0.01, 0.01, "t = 0 ms", horizontalalignment='left')
for i in range(int(nframes)):
for panel,obj in self.panels:
assert self.frame_duration == obj.frame_duration, "%s != %s" % (self.frame_duration, obj.frame_duration)
panel.lines = []; panel.images = []
plot = getattr(panel, obj.plot_function)
plot(*obj.next_frame(), **obj.kwargs)
if obj.plot_function == "imshow" and i==0:
pos = panel.get_position()
try:
l,b,w,h = pos # older versions of Matplotlib
except TypeError:
l,b,w,h = pos.bounds # newer versions return a Bbox object
cb_panel = self.fig.add_axes([l+w, b, 0.05*w, h])
self.fig.colorbar(panel.images[0], cb_panel)
time_label.set_text("t = %g ms" % (i*self.frame_duration,))
self.canvas.print_figure(os.path.join(self.frame_directory, "frame%06d.png" % i))
progress_bar(float(i)/nframes)
def __del__(self):
# when the object is deleted, delete temp directory
if hasattr(self, 'frame_directory'):
shutil.rmtree(self.frame_directory, ignore_errors=False)
def render(self, filename, fps=25):
command = "mencoder 'mf://%s/frame*.png' -mf type=png:fps=%d -ovc lavc -lavcopts vcodec=wmv2 -oac copy -o %s" % (self.frame_directory,
fps, filename)
print command
os.system(command)
class ImageSequence(object):
plot_function = "imshow"
def __init__(self, frames, frame_duration=40.0, **kwargs):
self.frames = frames
self.frame_duration = frame_duration
self._i = -1
self.kwargs = {'interpolation': 'nearest'}
self.kwargs.update(kwargs)
def next_frame(self):
self._i += 1
return [self.frames[:,:,self._i]]
class SineWave(object):
plot_function = "plot"
def __init__(self, **kwargs):
self.t = numpy.arange(0, 2*numpy.pi, numpy.pi/20)
self.phase = numpy.pi/20
self.kwargs = kwargs
def next_frame(self):
self.phase -= numpy.pi/20
return self.t, numpy.sin(self.t + self.phase)
def xy2ij(coordinates, height):
"""
Generally, we use (x,y) coordinates, but since arrays use matrix coordinates,
(x,y) <--> (j,i), and so we need to do a flip when putting data into arrays.
"""
assert len(coordinates) == 2
x,y = coordinates
j = x
i = height - 1 - y
return (i,j)
class ActivityMap(object):
plot_function = "imshow"
def __init__(self, spikelist, frame_duration=40.0, **kwargs):
self.spikelist = spikelist
self.frame_duration = frame_duration
self.kwargs = kwargs
if self.spikelist.dimensions is None:
raise Exception("Dimensions of the population are not defined ! Set spikelist.dims")
self.time, self.ids = self.spikelist.convert("times, ids")
# We sort the spikes to allow faster process later
sort_idx = self.time.ravel().argsort(kind="quicksort")
self.time = self.time[sort_idx]
self.ids = self.ids[sort_idx]
self.i = 0
self.max_i = len(self.time)-1
self.t_start = 0
def next_frame(self):
spk = self.spikelist
activity_map = numpy.zeros(spk.dimensions)
h,w = spk.dimensions
id_offset = min(spk.id_list())
xarr,yarr = spk.id2position(self.spikelist.id_list() - id_offset)
while (self.i < self.max_i) and (self.time[self.i] < self.t_start + self.frame_duration):
id = self.ids[self.i] - id_offset
x = xarr[id]
y = yarr[id]
#xy = spk.id2position(self.ids[self.i] - id_offset)
#assert xy == (x,y), "%s != %s" % (xy, str((x,y)))
activity_map[xy2ij((x,y), h)] += 1
self.i += 1
self.t_start += self.frame_duration
#logging.debug("next_frame: i=%d, t_start=%g, max_i=%d, time[i]=%g" % (self.i, self.t_start, self.max_i, self.time[self.i]))
activity_map *= 1000.0/self.frame_duration # convert to spikes/second
return [activity_map]
def test():
sine = SineWave()
movie = MultipanelMovie()
sine_panel = movie.add_panel(sine, (0.1,0.1), 0.4, 0.4)
sine_panel.set_title("Sine wave")
sine_panel.set_xticks(numpy.pi*numpy.arange(0, 2, 0.5))
lena = imread("lena.png")[:,:,0]
frames = numpy.zeros([lena.shape[0], lena.shape[1], 50])
for i in range(1,50):
j = 10*i
frame = numpy.zeros_like(lena)
frame[:, :j] = lena[:,-j:]
frames[:,:,i] = frame
lena_seq = ImageSequence(frames, cmap=matplotlib.cm.gray)
lena_panel = movie.add_panel(lena_seq, (0.55, 0.55), 0.4, 0.4)
movie.write_frames(50)
movie.render("test.mpg", fps=25)
print movie.frame_directory
| gpl-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/cluster/tests/test_k_means.py | 7 | 32602 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics.cluster import homogeneity_score
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_elkan_results():
rnd = np.random.RandomState(0)
X_normal = rnd.normal(size=(50, 10))
X_blobs, _ = make_blobs(random_state=0)
km_full = KMeans(algorithm='full', n_clusters=5, random_state=0, n_init=1)
km_elkan = KMeans(algorithm='elkan', n_clusters=5,
random_state=0, n_init=1)
for X in [X_normal, X_blobs]:
km_full.fit(X)
km_elkan.fit(X)
assert_array_almost_equal(km_elkan.cluster_centers_,
km_full.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_full.labels_)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_explicit_init_shape():
# test for sensible errors when giving explicit init
# with wrong number of features or clusters
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 3))
for Class in [KMeans, MiniBatchKMeans]:
# mismatch of number of features
km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))
msg = "does not match the number of features of the data"
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:, :2],
n_clusters=len(X))
assert_raises_regex(ValueError, msg, km.fit, X)
# mismatch of number of clusters
msg = "does not match the number of clusters"
km = Class(n_init=1, init=X[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1,
init=lambda X_, k, random_state: X_[:2, :],
n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42, n_clusters=2)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should no longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
msg = "does not match the number of clusters"
assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,
random_state=42).fit,
X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1,
algorithm='elkan')
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1,
algorithm='elkan')
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_int_input():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
for dtype in [np.int32, np.int64]:
X_int = np.array(X_list, dtype=dtype)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
for km in fitted_models:
assert_equal(km.cluster_centers_.dtype, np.float64)
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_predict_equal_labels():
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='full')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='elkan')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_full_vs_elkan():
km1 = KMeans(algorithm='full', random_state=13)
km2 = KMeans(algorithm='elkan', random_state=13)
km1.fit(X)
km2.fit(X)
homogeneity_score(km1.predict(X), km2.predict(X)) == 1.0
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
def test_max_iter_error():
km = KMeans(max_iter=-1)
assert_raise_message(ValueError, 'Number of iterations should be',
km.fit, X)
def test_float_precision():
km = KMeans(n_init=1, random_state=30)
mb_km = MiniBatchKMeans(n_init=1, random_state=30)
inertia = {}
X_new = {}
centers = {}
for estimator in [km, mb_km]:
for is_sparse in [False, True]:
for dtype in [np.float64, np.float32]:
if is_sparse:
X_test = sp.csr_matrix(X_csr, dtype=dtype)
else:
X_test = X.astype(dtype)
estimator.fit(X_test)
# dtype of cluster centers has to be the dtype of the input
# data
assert_equal(estimator.cluster_centers_.dtype, dtype)
inertia[dtype] = estimator.inertia_
X_new[dtype] = estimator.transform(X_test)
centers[dtype] = estimator.cluster_centers_
# ensure the extracted row is a 2d array
assert_equal(estimator.predict(X_test[:1]),
estimator.labels_[0])
if hasattr(estimator, 'partial_fit'):
estimator.partial_fit(X_test[0:3])
# dtype of cluster centers has to stay the same after
# partial_fit
assert_equal(estimator.cluster_centers_.dtype, dtype)
# compare arrays with low precision since the difference between
# 32 and 64 bit sometimes makes a difference up to the 4th decimal
# place
assert_array_almost_equal(inertia[np.float32], inertia[np.float64],
decimal=4)
assert_array_almost_equal(X_new[np.float32], X_new[np.float64],
decimal=4)
assert_array_almost_equal(centers[np.float32], centers[np.float64],
decimal=4)
def test_k_means_init_centers():
# This test is used to check KMeans won't mutate the user provided input
# array silently even if input data and init centers have the same type
X_small = np.array([[1.1, 1.1], [-7.5, -7.5], [-1.1, -1.1], [7.5, 7.5]])
init_centers = np.array([[0.0, 0.0], [5.0, 5.0], [-5.0, -5.0]])
for dtype in [np.int32, np.int64, np.float32, np.float64]:
X_test = dtype(X_small)
init_centers_test = dtype(init_centers)
assert_array_equal(init_centers, init_centers_test)
km = KMeans(init=init_centers_test, n_clusters=3, n_init=1)
km.fit(X_test)
assert_equal(False, np.may_share_memory(km.cluster_centers_, init_centers))
def test_sparse_k_means_init_centers():
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
# Get a local optimum
centers = KMeans(n_clusters=3).fit(X).cluster_centers_
# Fit starting from a local optimum shouldn't change the solution
np.testing.assert_allclose(
centers,
KMeans(n_clusters=3,
init=centers,
n_init=1).fit(X).cluster_centers_
)
# The same should be true when X is sparse
X_sparse = sp.csr_matrix(X)
np.testing.assert_allclose(
centers,
KMeans(n_clusters=3,
init=centers,
n_init=1).fit(X_sparse).cluster_centers_
)
def test_sparse_validate_centers():
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
# Get a local optimum
centers = KMeans(n_clusters=4).fit(X).cluster_centers_
# Test that a ValueError is raised for validate_center_shape
classifier = KMeans(n_clusters=3, init=centers, n_init=1)
msg = "The shape of the initial centers \(\(4L?, 4L?\)\) " \
"does not match the number of clusters 3"
assert_raises_regex(ValueError, msg, classifier.fit, X)
| mit |
benoitsteiner/tensorflow-opencl | tensorflow/python/estimator/inputs/pandas_io_test.py | 89 | 8340 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_NonBoolShuffle(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaisesRegexp(TypeError,
'shuffle must be explicitly set as boolean'):
# Default shuffle is None
pandas_io.pandas_input_fn(x, y_noindex)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
uwescience/pulse2percept | examples/models/plot_horsager2009.py | 1 | 9470 | # -*- coding: utf-8 -*-
"""
===============================================================================
Horsager et al. (2009): Predicting temporal sensitivity
===============================================================================
This example shows how to use the
:py:class:`~pulse2percept.models.Horsager2009Model`.
The model introduced in [Horsager2009]_ assumes that electrical stimulation
leads to percepts that quickly increase in brightness (over the time course
of ~100ms) and then slowly fade away (over the time course of seconds).
The model was fit to perceptual sensitivity data for a number of different
pulse trains, which are available in the :py:mod:`~pulse2percept.datasets`
subpackage.
The dataset can be loaded as follows:
"""
# sphinx_gallery_thumbnail_number = 3
from pulse2percept.datasets import load_horsager2009
data = load_horsager2009()
data.shape
###############################################################################
# Single-pulse thresholds
# -----------------------
#
# Loading the data
# ^^^^^^^^^^^^^^^^
#
# The data includes a number of thresholds measured on single-pulse stimuli.
# We can load a subset of these data; for example, for subject S05 and
# Electrode C3:
single_pulse = load_horsager2009(subjects='S05', electrodes='C3',
stim_types='single_pulse')
single_pulse
###############################################################################
# Creating the stimulus
# ^^^^^^^^^^^^^^^^^^^^^
#
# To recreate Fig. 3 in the paper, where the model fit to single-pulse stimuli
# is shown, we first need to recreate the stimulus used in the figure.
#
# For example, we can create a stimulus from a single biphasic pulse
# (0.075 ms phase duration) with amplitude 180 uA, lasting 200 ms in total:
import numpy as np
from pulse2percept.stimuli import BiphasicPulse
phase_dur = 0.075
stim_dur = 200
pulse = BiphasicPulse(180, phase_dur, interphase_dur=phase_dur,
stim_dur=stim_dur, cathodic_first=True)
pulse.plot(time=np.linspace(0, 10, num=10000))
###############################################################################
# Simulating the model response
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The model's response to this stimulus can be visualized as follows:
from pulse2percept.models import Horsager2009Temporal
model = Horsager2009Temporal()
model.build()
percept = model.predict_percept(pulse, t_percept=np.arange(stim_dur))
max_bright = percept.data.max()
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(12, 5))
ax.plot(pulse.time, -20 + 10 * pulse.data[0, :] / pulse.data.max(),
linewidth=3, label='pulse')
ax.plot(percept.time, percept.data[0, 0, :], linewidth=3, label='percept')
ax.plot([0, stim_dur], [max_bright, max_bright], 'k--', label='max brightness')
ax.plot([0, stim_dur], [0, 0], 'k')
ax.set_xlabel('Time (s)')
ax.set_ylabel('Predicted brightness (a.u.)')
ax.set_xlim(0, stim_dur)
fig.legend(loc='center right')
fig.tight_layout()
###############################################################################
# Finding the threshold current
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Finally, we need to find the "threshold current" to ultimately reproduce
# Fig. 3.
# In the real world, the threshold current is defined as the stimulus amplitude
# needed to elicit a detectable phosphene (e.g.) 50% of the time.
# This threshold current typically differs for every stimulus, stimulated
# electrode, and patient.
#
# In the model, there is no notion of "seeing something 50% of the time".
# Instead, the model was assumed to reach threshold if the model response
# exceeded some constant :math:`\\theta` over time.
#
# The process of finding the stimulus amplitude needed to achieve model output
# :math:`\\theta` can be automated with the help of the
# :py:meth:`~pulse2percept.models.Horsager2009Temporal.find_threshold` method.
#
# We will run this method on every data point from the ones selected above:
amp_th = []
for _, row in single_pulse.iterrows():
# Set up a biphasic pulse with amplitude 1uA - the amplitude will be
# up-and-down regulated by find_threshold until the output matches
# theta:
stim = BiphasicPulse(1, row['pulse_dur'],
interphase_dur=row['interphase_dur'],
stim_dur=row['stim_dur'],
cathodic_first=True)
# Find the current that gives model output theta. Search amplitudes in the
# range [0, 300] uA. Stop the search once the candidate amplitudes are
# within 1 uA, or the model output is within 0.1 of theta:
amp_th.append(model.find_threshold(stim, row['theta'],
amp_range=(0, 300), amp_tol=1,
bright_tol=0.1))
plt.semilogx(single_pulse.pulse_dur, single_pulse.stim_amp, 's', label='data')
plt.semilogx(single_pulse.pulse_dur, amp_th, 'k-', linewidth=3, label='model')
plt.xticks([0.1, 1, 4])
plt.xlabel('pulse duration (ms)')
plt.ylabel('threshold current (uA)')
plt.legend()
plt.title('Fig. 3B: S05 (C3)')
###############################################################################
# Fixed-duration pulse train thresholds
# -------------------------------------
#
# The same procedure can be repeated for
# :py:class:`~pulse2percept.stimuli.BiphasicPulseTrain` stimuli to reproduce
# Fig. 4.
from pulse2percept.stimuli import BiphasicPulseTrain
# Load the data:
fixed_dur = data[(data.stim_type == 'fixed_duration') &
(data.subject == 'S05') &
(data.electrode == 'C3') &
(data.pulse_dur == 0.075)]
# Find the threshold:
amp_th = []
for _, row in fixed_dur.iterrows():
stim = BiphasicPulseTrain(row['stim_freq'], 1, row['pulse_dur'],
interphase_dur=row['interphase_dur'],
stim_dur=row['stim_dur'], cathodic_first=True)
amp_th.append(model.find_threshold(stim, row['theta'],
amp_range=(0, 300), amp_tol=1,
bright_tol=0.1))
plt.semilogx(fixed_dur.stim_freq, fixed_dur.stim_amp, 's', label='data')
plt.semilogx(fixed_dur.stim_freq, amp_th, 'k-', linewidth=3, label='model')
plt.xticks([5, 15, 75, 225])
plt.xlabel('frequency (Hz)')
plt.ylabel('threshold current (uA)')
plt.legend()
plt.title('Fig. 4B: S05 (C3), 0.075 ms pulse width')
###############################################################################
# Other stimuli
# -------------
#
# Bursting pulse triplets
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# "Bursting pulse triplets" as shown in Fig. 7 are readily supported via the
# :py:class:`~pulse2percept.stimuli.BiphasicTripletTrain` class.
#
# Variable-duration pulse trains
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# A "variable-duration" pulse train is essentially
# :py:class:`~pulse2percept.stimuli.BiphasicPulseTrain` cut to the length of
# N pulses.
#
# For example, the following recreates a pulse train used in Fig. 5B:
from pulse2percept.stimuli import BiphasicPulseTrain
n_pulses = 2
freq = 3
amp = 180
phase_dur = 0.075
pt = BiphasicPulseTrain(freq, amp, phase_dur, interphase_dur=phase_dur,
n_pulses=n_pulses, cathodic_first=True,
stim_dur=np.maximum(np.ceil(n_pulses * 1000.0 / freq),
200))
pt.plot()
###############################################################################
# Latent addition
# ---------------
#
# "Latent addition" stimuli only show up in the supplementary materials
# (see Fig. S2.2).
#
# They are pseudo-monophasic pulse pairs, where the anodic phases were
# presented 20 ms after the end of the second cathodic pulse.
#
# The initial cathodic pulse always has a fixed amplitude of 50% of the single
# pulse threshold:
from pulse2percept.stimuli import MonophasicPulse
# Phase duration:
phase_dur = 0.075
# Single-pulse threshold determines this current:
amp_th = 20
# Cathodic phase of the standard pulse::
cath_standard = MonophasicPulse(-0.5 * amp_th, phase_dur)
###############################################################################
# The delay between the start of the conditioning pulse and the start of the
# test pulse was varied systematically (between 0.15 and 12 ms).
# The amplitude of the second pulse was varied to determine thresholds.
# Delay was varied between 0.15 and 12 ms:
delay_dur = 12
# Vary this current to determine threshold:
amp_test = 45
# Cathodic phase of the test pulse (delivered after a delay):
cath_test = MonophasicPulse(-amp_test, phase_dur, delay_dur=delay_dur)
###############################################################################
# The anodic phase were always presented 20 ms after the second cathodic phase:
anod_standard = MonophasicPulse(0.5 * amp_th, phase_dur, delay_dur=20)
anod_test = MonophasicPulse(amp_test, phase_dur, delay_dur=delay_dur)
###############################################################################
# The last step is to concatenate all the pulses into a single stimulus:
from pulse2percept.stimuli import Stimulus
data = []
time = []
time_tracker = 0
for pulse in (cath_standard, cath_test, anod_standard, anod_test):
data.append(pulse.data)
time.append(pulse.time + time_tracker)
time_tracker += pulse.time[-1]
latent_add = Stimulus(np.concatenate(data, axis=1), time=np.concatenate(time))
latent_add.plot()
| bsd-3-clause |
dssg/wikienergy | disaggregator/build/pandas/pandas/tests/test_msgpack/test_seq.py | 6 | 1439 | #!/usr/bin/env python
# coding: utf-8
from pandas import compat
from pandas.compat import u
import pandas.msgpack as msgpack
binarydata = [chr(i) for i in range(256)]
binarydata = "".join(binarydata)
if compat.PY3:
binarydata = binarydata.encode('utf-8')
def gen_binary_data(idx):
data = binarydata[:idx % 300]
return data
def test_exceeding_unpacker_read_size():
dumpf = compat.BytesIO()
packer = msgpack.Packer()
NUMBER_OF_STRINGS = 6
read_size = 16
# 5 ok for read_size=16, while 6 glibc detected *** python: double free or corruption (fasttop):
# 20 ok for read_size=256, while 25 segfaults / glibc detected *** python: double free or corruption (!prev)
# 40 ok for read_size=1024, while 50 introduces errors
# 7000 ok for read_size=1024*1024, while 8000 leads to glibc detected *** python: double free or corruption (!prev):
for idx in range(NUMBER_OF_STRINGS):
data = gen_binary_data(idx)
dumpf.write(packer.pack(data))
f = compat.BytesIO(dumpf.getvalue())
dumpf.close()
unpacker = msgpack.Unpacker(f, read_size=read_size, use_list=1)
read_count = 0
for idx, o in enumerate(unpacker):
assert type(o) == bytes
assert o == gen_binary_data(idx)
read_count += 1
assert read_count == NUMBER_OF_STRINGS
| mit |
toobaz/pandas | asv_bench/benchmarks/stat_ops.py | 1 | 4452 | import numpy as np
import pandas as pd
ops = ["mean", "sum", "median", "std", "skew", "kurt", "mad", "prod", "sem", "var"]
class FrameOps:
params = [ops, ["float", "int"], [0, 1], [True, False]]
param_names = ["op", "dtype", "axis", "use_bottleneck"]
def setup(self, op, dtype, axis, use_bottleneck):
df = pd.DataFrame(np.random.randn(100000, 4)).astype(dtype)
try:
pd.options.compute.use_bottleneck = use_bottleneck
except TypeError:
from pandas.core import nanops
nanops._USE_BOTTLENECK = use_bottleneck
self.df_func = getattr(df, op)
def time_op(self, op, dtype, axis, use_bottleneck):
self.df_func(axis=axis)
class FrameMultiIndexOps:
params = ([0, 1, [0, 1]], ops)
param_names = ["level", "op"]
def setup(self, level, op):
levels = [np.arange(10), np.arange(100), np.arange(100)]
codes = [
np.arange(10).repeat(10000),
np.tile(np.arange(100).repeat(100), 10),
np.tile(np.tile(np.arange(100), 100), 10),
]
index = pd.MultiIndex(levels=levels, codes=codes)
df = pd.DataFrame(np.random.randn(len(index), 4), index=index)
self.df_func = getattr(df, op)
def time_op(self, level, op):
self.df_func(level=level)
class SeriesOps:
params = [ops, ["float", "int"], [True, False]]
param_names = ["op", "dtype", "use_bottleneck"]
def setup(self, op, dtype, use_bottleneck):
s = pd.Series(np.random.randn(100000)).astype(dtype)
try:
pd.options.compute.use_bottleneck = use_bottleneck
except TypeError:
from pandas.core import nanops
nanops._USE_BOTTLENECK = use_bottleneck
self.s_func = getattr(s, op)
def time_op(self, op, dtype, use_bottleneck):
self.s_func()
class SeriesMultiIndexOps:
params = ([0, 1, [0, 1]], ops)
param_names = ["level", "op"]
def setup(self, level, op):
levels = [np.arange(10), np.arange(100), np.arange(100)]
codes = [
np.arange(10).repeat(10000),
np.tile(np.arange(100).repeat(100), 10),
np.tile(np.tile(np.arange(100), 100), 10),
]
index = pd.MultiIndex(levels=levels, codes=codes)
s = pd.Series(np.random.randn(len(index)), index=index)
self.s_func = getattr(s, op)
def time_op(self, level, op):
self.s_func(level=level)
class Rank:
params = [["DataFrame", "Series"], [True, False]]
param_names = ["constructor", "pct"]
def setup(self, constructor, pct):
values = np.random.randn(10 ** 5)
self.data = getattr(pd, constructor)(values)
def time_rank(self, constructor, pct):
self.data.rank(pct=pct)
def time_average_old(self, constructor, pct):
self.data.rank(pct=pct) / len(self.data)
class Correlation:
params = [["spearman", "kendall", "pearson"], [True, False]]
param_names = ["method", "use_bottleneck"]
def setup(self, method, use_bottleneck):
try:
pd.options.compute.use_bottleneck = use_bottleneck
except TypeError:
from pandas.core import nanops
nanops._USE_BOTTLENECK = use_bottleneck
self.df = pd.DataFrame(np.random.randn(1000, 30))
self.df2 = pd.DataFrame(np.random.randn(1000, 30))
self.s = pd.Series(np.random.randn(1000))
self.s2 = pd.Series(np.random.randn(1000))
def time_corr(self, method, use_bottleneck):
self.df.corr(method=method)
def time_corr_series(self, method, use_bottleneck):
self.s.corr(self.s2, method=method)
def time_corrwith_cols(self, method, use_bottleneck):
self.df.corrwith(self.df2, method=method)
def time_corrwith_rows(self, method, use_bottleneck):
self.df.corrwith(self.df2, axis=1, method=method)
class Covariance:
params = [[True, False]]
param_names = ["use_bottleneck"]
def setup(self, use_bottleneck):
try:
pd.options.compute.use_bottleneck = use_bottleneck
except TypeError:
from pandas.core import nanops
nanops._USE_BOTTLENECK = use_bottleneck
self.s = pd.Series(np.random.randn(100000))
self.s2 = pd.Series(np.random.randn(100000))
def time_cov_series(self, use_bottleneck):
self.s.cov(self.s2)
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
ericfourrier/decam | decam/feature_importance.py | 1 | 5164 | # -*- coding: utf-8 -*-
"""
@author: kevin olivier
"""
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
class FeatureImportance:
def __init__(self, df, resp):
self.dataframe = df
self.response = resp
self.predictors = pd.Series(self.dataframe.columns)
self._rf_imp = []
self._boosting_imp = []
self._rpe_imp = []
self._kbest_imp = []
self._rf_param = []
self._boosting_param = []
self._rpe_param = []
self._kbest_param = []
def rf(self, n_estimators=500, criterion='gini', max_features='auto'):
""" Returns the importances calculated by a random forest classifier.
To make the method more effective, the result is stored in a private
property, so if it is used with the same parameters again, it will only
have to print the result.
Parameters:
* n_estimators: number of trees in the forest
* criterion: optimization criterion when building the trees.
'gini' (default) for Gini impurity
'entropy' for the information gain
* max_features: number of features to select at each split
"""
if self._rf_param == [n_estimators, criterion, max_features]:
return pd.DataFrame({'Predictors': self.predictors, 'RF': self._rf_imp})
else:
self._rf_param = [n_estimators, criterion, max_features]
if max_features == 'auto':
model = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion, max_features=int(np.sqrt(len(self.dataframe.columns))), bootstrap=True).fit(self.dataframe, self.response)
else:
model = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion, max_features=max_features, bootstrap=True).fit(self.dataframe, self.response)
self._rf_imp = model.feature_importances_/model.feature_importances_.max()
return pd.DataFrame({'Predictors': self.predictors, 'RF': self._rf_imp})
def boosting(self, n_estimators=2000, learning_rate=.1, max_depth=1):
""" Returns the importance calculated by a gradient boosting classifier.
To make the method more effective, the result is stored in a private
property, so if it is used with the same parameters again, it will only
have to print the result.
Parameters:
* n_estimators: number of boosting stages to perform
* learning_rate: coefficient by which shrink the contribution of each tree
* max_depth: maximum depth of the individual regression estimators
"""
if self._boosting_param == [n_estimators, learning_rate, max_depth]:
return pd.DataFrame({'Predictors': self.predictors, 'Boosting': self._boosting_imp})
else:
self._boosting_param = [n_estimators, learning_rate, max_depth]
model = GradientBoostingClassifier(n_estimators=n_estimators, learning_rate=learning_rate, max_depth=max_depth).fit(self.dataframe, self.response)
self._boosting_imp = model.feature_importances_/model.feature_importances_.max()
return pd.DataFrame({'Predictors': self.predictors, 'Boosting': self._boosting_imp})
def kbest(self, score_func=f_classif, k='all'):
""" Returns the scores of the k best predictors according to the ANOVA
One-way F-test.
To make the method more effective, the result is stored in a private
property, so if it is used with the same parameters again, it will only
have to print the result.
Parameters:
* score_func: scoring function. use f_classif or chi2 for classification
* k: number of parameters to rank. If 'all' is given, the function will
returns the score for all predictors
"""
if self._kbest_param == [score_func, k]:
return pd.DataFrame({'Predictors': self.predictors, 'KBest': self._kbest_imp})
else:
self._kbest_param = [score_func, k]
kb = SelectKBest(score_func=score_func, k=k).fit(self.dataframe, self.response)
self._kbest_imp = pd.Series(['Ranked below k']*len(self.predictors))
self._kbest_imp[ kb.get_support() ] = kb.scores_
return pd.DataFrame({'Predictors': self.predictors, 'KBest': self._kbest_imp})
def rpe(self, cutoff=.90, method='pearson'):
""" Returns a series of boolean stating whether the corresponding predictor
remains after performing a recursive pairwise elimination.
To make the method more effective, the result is stored in a private
property, so if it is used with the same parameters again, it will only
have to print the result.
Parameters:
* cutoff: correlation cutoff to stop the algorithm
* method: method to compute the correlation matrix
'peason'
'kendall'
'spearman'
"""
if self._rpe_param == [cutoff, method]:
return pd.DataFrame({'Predictors': self.predictors, 'RPE': self._rpe_imp})
else:
self._rpe_param = [cutoff, method]
rpe_list = findcorr(self.dataframe, cutoff=cutoff, method=method)
self._rpe_imp = self.predictors.apply(lambda x: (x not in rpe_list))
return pd.DataFrame({'Predictors': self.predictors, 'RPE': self._rpe_imp})
def summary(self):
""" Returns a dataframe with the result of all the methods. """
return self.rf().merge(self.boosting(), on='Predictors').merge(self.kbest(), on='Predictors').merge(self.rpe(), on='Predictors')
pass
| mit |
terkkila/scikit-learn | sklearn/pipeline.py | 162 | 21103 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
BuddyVolly/OpenSARKit | lib/python/ost_regressor.py | 2 | 6757 | #! /usr/bin/python
# thanks to the great tutorial of Carlos de la Torre
# http://www.machinalis.com/blog/python-for-geospatial-data-processing/
import numpy as np
import os
from osgeo import gdal, ogr, osr
from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
# Tell GDAL to throw Python exceptions, and register all drivers
gdal.UseExceptions()
gdal.AllRegister()
def regressor(raster, vector, field, ofile):
print("------------------------------------------------------")
print("INFO: Opening the vector file")
# Open the vector dataset from the file
vds = ogr.Open(vector)
# Make sure the dataset exists -- it would be None if we couldn't open it
if not vds:
print('ERROR: could not open vector dataset. File does not exit.')
exit()
### Let's get the driver from this file
vdriver = vds.GetDriver()
### How many layers are contained in this Shapefile?
layer_count = vds.GetLayerCount()
### What is the name of the 1 layer?
layer = vds.GetLayerByIndex(0)
### What is the layer's geometry? is it a point? a polyline? a polygon?
# First read in the geometry - but this is the enumerated type's value
geometry = layer.GetGeomType()
# So we need to translate it to the name of the enum
geometry_name = ogr.GeometryTypeToName(geometry)
if geometry_name != 'Polygon':
print("ERROR: Shapefile is not a polygon layer.")
exit()
### What is the layer's projection?
# Get the spatial reference
spatial_ref = layer.GetSpatialRef()
# Export this spatial reference to something we can read... like the Proj4
proj4 = spatial_ref.ExportToProj4()
epsg = spatial_ref.GetAttrValue("GEOGCS|AUTHORITY", 1)
### How many features are in the layer?
feature_count = layer.GetFeatureCount()
print('INFO: Layer has {n} features'.format(n=feature_count))
### How many fields are in the shapefile, and what are their names?
# First we need to capture the layer definition
defn = layer.GetLayerDefn()
# How many fields
field_count = defn.GetFieldCount()
# What are their names?
print('INFO: Dataset driver is {n}'.format(n=vdriver.name))
print('INFO: The shapefile has {n} layer(s)'.format(n=layer_count))
print('INFO: The layer is named: {n}'.format(n=layer.GetName()))
print("INFO: The layer's geometry is: {geom}".format(geom=geometry_name))
print('INFO: Layer projection is EPSG: {epsg}'.format(epsg=epsg))
print('INFO: Layer has {n} fields'.format(n=field_count))
print('INFO: Field names are: ')
for i in range(field_count):
field_defn = defn.GetFieldDefn(i)
print('\t{name} - {datatype}'.format(name=field_defn.GetName(), datatype=field_defn.GetTypeName()))
if field_defn.GetName() == field:
j = 1
# throw an error if field name is not available
if j != 1:
print('ERROR: No field named {f}'.format(f=field))
exit()
print("------------------------------------------------------\n")
print("------------------------------------------------------")
print "INFO: Opening raster file."
# Open the vector dataset from the file
rds = gdal.Open(raster)
# Make sure the dataset exists -- it would be None if we couldn't open it
if not rds:
print('ERROR: could not open raster dataset. File does not exist.')
exit()
cols = rds.RasterXSize
rows = rds.RasterYSize
bands = rds.RasterCount
geo_transform = rds.GetGeoTransform()
rprojection = rds.GetProjectionRef()
rsrs=osr.SpatialReference(wkt=rprojection)
# print out some infos
print "INFO: Input raster file has " + str(rows) + " rows and " + str(cols) + " columns."
print "INFO: Input raster file has " + str(bands) + " bands."
print "INFO: Input raster projection is EPSG: " + rsrs.GetAttrValue('GEOGCS|AUTHORITY', 1)
print("------------------------------------------------------\n")
print("------------------------------------------------------")
print("INFO: Rasterizing the training data.")
###Rasterize training data
# Create the raster dataset
ras_driver = gdal.GetDriverByName('GTiff')
out_raster_ds = ras_driver.Create(ofile, cols, rows, 1, gdal.GDT_Float32)
# Set the ROI image's projection and extent to our input raster's projection and extent
out_raster_ds.SetProjection(rprojection)
out_raster_ds.SetGeoTransform(geo_transform)
# Fill our output band with the 0 blank, no class label, value
b = out_raster_ds.GetRasterBand(1)
b.Fill(0)
# Rasterize the shapefile layer to our new dataset
status = gdal.RasterizeLayer(out_raster_ds, # output to our new dataset
[1], # output to our new dataset's first band
layer, # rasterize this layer
options = ['ALL_TOUCHED=FALSE', # rasterize all pixels touched by polygons
"ATTRIBUTE=%s" % field] # put raster values according to the 'id' field values
)
# Close dataset
out_raster_ds = None
if status != 0:
print("Rasterize of training data failed")
exit()
else:
print("Successfully rasterized the training data.")
def main():
from optparse import OptionParser
from time import time
usage = "usage: %prog [options] -r inputstack -v input vector -f vector field -o output file "
parser = OptionParser()
parser.add_option("-r", "--inputraster", dest="iraster",
help="select an input raster stack", metavar="<input raster stack>")
parser.add_option("-v", "--inputvector", dest="ivector",
help="select a training data shape file ", metavar="<input training vector>")
parser.add_option("-f", "--vectorfield", dest="vfield",
help="select the column of the shapefile with the biomass values", metavar="<vector field>")
parser.add_option("-o", "--outputfile", dest="ofile",
help="Outputfile prefix ", metavar="<outputfile prefix>")
(options, args) = parser.parse_args()
if not options.iraster:
parser.error("Input stack is empty")
print usage
if not options.ivector:
parser.error("Input vector is empty")
print usage
if not options.vfield:
parser.error("No column name selected")
print usage
if not options.ofile:
parser.error("Output file is empty")
print usage
currtime = time()
print options.ofile
regressor(options.iraster,options.ivector,options.vfield,options.ofile)
print 'time elapsed:', time() - currtime
if __name__ == "__main__":
main()
| mit |
MatthieuBizien/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 55 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
darioizzo/optimal_landing | indirect_method/falcon_landing.py | 1 | 16719 | """
Implements an indirect method to solve the optimal control
problem of a varying mass spacecraft controlled by one
thruster capable of vectoring.
Dario Izzo 2016
"""
from PyGMO.problem._base import base
from numpy.linalg import norm
from math import sqrt, sin, cos, atan2, pi
from scipy.integrate import odeint
from numpy import linspace, vstack, hstack
from copy import deepcopy
import sys
import numpy as np
class tv_landing(base):
def __init__(
self,
state0 = [0., 1000., 20., -5., 0., 0., 10000.],
statet = [0., 0., 0., 0., 0., 0., 9758.695805],
c1 = 5886000.*0.3,
c2 = 311. * 9.81,
c3 = 300.,
g = 9.81,
homotopy = 0.,
pinpoint = False,
normas = 80000.
):
"""
USAGE: tv_landing(self, start, end, Isp, Tmax, mu):
* state0: initial state [x, y, vx, vy, theta, omega, m] in m, m , m/s, m/s, rad, rad/s, kg
* statet: target state [x, y, vx, vy, theta, omega, m] in m, m, m/s, m/s, rad, rad/s, kg
* c1: maximum thrusts for the main thruster [N]
* c2: veff, Isp*g0 (m / s)
* c3: characteristic length (I / m / d) [m]
* g: planet gravity [m/s**2]
* homotopy: homotopy parameter, 0->QC, 1->MOC
* pinpoint: if True toggles the final constraint on the landing x
"""
super(tv_landing, self).__init__(8, 0, 1, 8, 0, 1e-3)
# We store the raw inputs for convenience
self.state0_input = state0
self.statet_input = statet
# We define t he non dimensional units (will use these from here on)
self.R = 1000.
self.V = 100.
self.M = normas
self.A = (self.V * self.V) / self.R
self.T = self.R / self.V
self.F = self.M * self.A
self.RAD = 1
# We store the parameters
self.c1 = c1 / self.F
self.c2 = c2 / self.V
self.c3 = c3 / self.R
self.g = g / self.A
print(self.c1)
# We compute the initial and final state in the new units
self.state0 = self._non_dim(self.state0_input)
self.statet = self._non_dim(self.statet_input)
# We set the bounds (these will only be used to initialize the population)
self.set_bounds([-1] * 7 + [1. / self.T], [1] * 7 + [100. / self.T])
# Activates a pinpoint landing
self.pinpoint = pinpoint
# Selects the homotopy parameter, 0->QC, 1->MOC
self.homotopy = homotopy
def _objfun_impl(self, x):
return(1.,) # constraint satisfaction, no objfun
def _compute_constraints_impl(self, x):
# Perform one forward shooting
xf, info = self._shoot(x)
# Assembling the equality constraint vector
ceq = list([0]*8)
# Final conditions
if self.pinpoint:
#Pinpoint landing x is fixed lx is free
ceq[0] = (xf[-1][0] - self.statet[0] ) * 1
else:
#Transversality condition: x is free lx is 0
ceq[0] = xf[-1][7] * 1
ceq[1] = (xf[-1][1] - self.statet[1] ) * 1
ceq[2] = (xf[-1][2] - self.statet[2] ) * 1
ceq[3] = (xf[-1][3] - self.statet[3] ) * 1
ceq[4] = (xf[-1][4] - self.statet[4] ) * 1
# Transversality condition on omega and mass (free)
ceq[5] = xf[-1][12] * 1
ceq[6] = xf[-1][13] * 1
# Free time problem, Hamiltonian must be 0
ceq[7] = self._hamiltonian(xf[-1]) * 1
return ceq
def _hamiltonian(self, full_state):
state = full_state[:7]
costate = full_state[7:]
# Applying Pontryagin minimum principle
controls = self._pontryagin_minimum_principle(full_state)
# Computing the R.H.S. of the state eom
f_vett = self._eom_state(state, controls)
# Assembling the Hamiltonian
H = 0.
for l, f in zip(costate, f_vett):
H += l * f
# Adding the integral cost function (WHY -)
H += self._cost(state, controls)
return H
def _cost(self,state, controls):
c1 = self.c1
c2 = self.c2
c3 = self.c3
u, ut = controls
retval = self.homotopy * c1/c2 * u + (1 - self.homotopy) * c1**2 / c2 * u**2
### !! retval = self.homotopy * c1*c1 / c2 * u + (1 - self.homotopy) * c1**2 / c2 *
### u**2
return retval
def _eom_state(self, state, controls):
# Renaming variables
x,y,vx,vy,theta,omega,m = state
g = self.g
c1 = self.c1
c2 = self.c2
c3 = self.c3
u, ut = controls
tdotit = ut[0] * cos(theta) - ut[1] * sin(theta)
# Equations for the state
dx = vx
dy = vy
dvx = c1 * u / m * ut[0]
dvy = c1 * u / m * ut[1] - g
dtheta = omega
domega = - c1 / c3 * u / m * tdotit
dm = - c1 / c2 * u
return [dx, dy, dvx, dvy, dtheta, domega, dm]
def _eom_costate(self, full_state, controls):
# Renaming variables
x,y,vx,vy,theta,omega,m,lx,ly,lvx,lvy,ltheta,lomega,lm = full_state
c1 = self.c1
c2 = self.c2
c3 = self.c3
u, ut = controls
# Equations for the costate
tdotit = ut[0] * cos(theta) - ut[1] * sin(theta)
tdotitheta = ut[0] * sin(theta) + ut[1] * cos(theta)
lvdott = lvx * ut[0] + lvy * ut[1]
dlx = 0.
dly = 0.
dlvx = - lx
dlvy = - ly
dltheta = - lomega / c3 * c1 * u / m * tdotitheta
dlomega = - ltheta
dlm = c1 / m**2 * u * (lvdott - lomega / c3 * tdotit)
return [dlx, dly, dlvx, dlvy, dltheta, dlomega, dlm]
def _pontryagin_minimum_principle(self, full_state):
# Renaming variables
x,y,vx,vy,theta,omega,m,lx,ly,lvx,lvy,ltheta,lomega,lm = full_state
c1 = self.c1
c2 = self.c2
c3 = self.c3
lauxx = lvx - lomega / c3 * cos(theta)
lauxy = lvy + lomega / c3 * sin(theta)
laux = sqrt(lauxx**2 + lauxy**2)
# ut
ut = [0]*2
ut[0] = - lauxx / laux
ut[1] = - lauxy / laux
# u
u = 0
if self.homotopy==1:
S = 1. - lm - laux * c2 / m
if S >= 0:
u=0.
if S < 0:
u=1.
else:
u = 1. / 2. / c1 / (1.-self.homotopy) * (lm + laux * c2 / m - self.homotopy)
# u = 1. / 2. / c1 / (1.-self.homotopy) * (lm + laux * c2 / m - self.homotopy)
# u = 1. / 2. / c1 / (1.-self.homotopy) * (lm + laux * c2 / m - self.homotopy)
u = min(u,1.) # NOTE: this can be increased to help convergence?
u = max(u,0.)
return u, ut
def _eom(self, full_state, t):
# Applying Pontryagin minimum principle
state = full_state[:7]
controls = self._pontryagin_minimum_principle(full_state)
# Equations for the state
dstate = self._eom_state(state, controls)
# Equations for the co-states
dcostate = self._eom_costate(full_state, controls)
return dstate + dcostate
def _shoot(self, x):
# Numerical Integration
xf, info = odeint(lambda a,b: self._eom(a,b), self.state0 + list(x[:-1]), linspace(0, x[-1],1000), rtol=1e-5, atol=1e-5,
full_output=1,
mxstep=5000, hmax=0.01, hmin=1e-12, printmessg=False)
return xf, info
def _simulate(self, x, tspan):
# Numerical Integration
print('simulate')
xf= odeint(lambda a,b: self._eom(a,b), self.state0 + list(x[:-1]), tspan, rtol=1e-6, atol=1e-6, full_output=0,
mxstep=5000,hmax=0.01, hmin=1e-8, printmessg=False)
info = []
return xf, info
def _non_dim(self, state):
xnd = deepcopy(state)
xnd[0] /= self.R
xnd[1] /= self.R
xnd[2] /= self.V
xnd[3] /= self.V
xnd[4] /= self.RAD
xnd[5] *= (self.RAD*self.T)
xnd[6] /= self.M
return xnd
def _dim_back(self, state):
xd = deepcopy(state)
xd[0] *= self.R
xd[1] *= self.R
xd[2] *= self.V
xd[3] *= self.V
xd[4] *= 1.
xd[5] /= self.T
xd[6] *= self.M
return xd
def plot(self, x):
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
mpl.rcParams['legend.fontsize'] = 10
# Producing the data
tspan = linspace(0, x[-1], 300)
full_state, info = self._simulate(x, tspan)
# Putting dimensions back
res = list()
controls = list()
ux = list(); uy=list()
for line in full_state:
res.append(self._dim_back(line[:7]))
controls.append(self._pontryagin_minimum_principle(line))
ux.append(controls[-1][0] * controls[-1][1][0])
uy.append(controls[-1][0] * controls[-1][1][1])
tspan = [it * self.T for it in tspan]
x = list(); y=list()
vx = list(); vy = list()
theta = list()
omega = list()
m = list()
for state in res:
x.append(state[0])
y.append(state[1])
vx.append(state[2])
vy.append(state[3])
theta.append(state[4])
omega.append(state[5])
m.append(state[6])
fig = plt.figure()
ax = fig.gca()
ax.plot(x, y, color='r', label='Trajectory')
ax.quiver(x, y, ux, uy, label='Thrust', pivot='tail', width=0.001)
ax.set_ylim(0,self.state0_input[1]+500)
f, axarr = plt.subplots(3, 2)
axarr[0,0].plot(x, y)
axarr[0,0].set_xlabel('x'); axarr[0,0].set_ylabel('y');
axarr[1,0].plot(vx, vy)
axarr[1,0].set_xlabel('vx'); axarr[1,0].set_ylabel('vy');
axarr[2,0].plot(tspan, theta)
axarr[2,0].set_xlabel('t'); axarr[2,0].set_ylabel('theta');
axarr[0,1].plot(tspan, [controls[ix][0] for ix in range(len(controls))],'r')
axarr[0,1].set_ylabel('u')
axarr[0,1].set_xlabel('t')
axarr[1,1].plot(tspan, [controls[ix][1][0] for ix in range(len(controls))],'k')
axarr[1,1].set_ylabel('sin(ut)')
axarr[1,1].set_xlabel('t')
axarr[2,1].plot(tspan, m)
axarr[2,1].set_xlabel('t'); axarr[2,1].set_ylabel('m');
plt.ion()
plt.show()
return axarr
def human_readable_extra(self):
s = "\n\tDimensional inputs:\n"
s = s + "\tStarting state: " + str(self.state0_input) + "\n"
s = s + "\tTarget state: " + str(self.statet_input) + "\n"
s = s + "\tThrusters maximum magnitude [N]: " + str(self.c1 * self.F) + "\n"
s = s + "\tIsp*g0: " + str(self.c2 * self.V) + ", gravity: " + str(self.g * self.A) + "\n"
s = s + "\n\tNon-dimensional inputs:\n"
s = s + "\tStarting state: " + str(self.state0) + "\n"
s = s + "\tTarget state: " + str(self.statet) + "\n"
s = s + "\tThrusters maximum magnitude [N]: " + str(self.c1) + "\n"
s = s + "\tIsp*g0: " + str(self.c2) + ", gravity: " + str(self.g) + "\n\n"
s = s + "\tHomotopy parameter: " + str(self.homotopy)
s = s + "\n\tPinpoint?: " + str(self.pinpoint)
return s
def produce_data(self, x, npoints):
# Producing the data
tspan = linspace(0, x[-1], npoints)
full_state, info = self._simulate(x, tspan)
# Putting dimensions back
res = list()
controls = list()
u1 = list(); u2 = list()
for line in full_state:
res.append(self._dim_back(line[:7]))
controls.append(self._pontryagin_minimum_principle(line))
u1.append(controls[-1][0])
u2.append(controls[-1][1])
u1 = vstack(u1)
u2 = vstack(u2)
tspan = [it * self.T for it in tspan]
x = list(); y=list()
vx = list(); vy = list()
theta = list(); omega = list()
m = list()
for state in res:
x.append(state[0])
y.append(state[1])
vx.append(state[2])
vy.append(state[3])
theta.append(state[4])
omega.append(state[5])
m.append(state[6])
tspan = vstack(tspan)
x = vstack(x)
y = vstack(y)
vx = vstack(vx)
vy = vstack(vy)
theta =vstack(theta)
omega = vstack(omega)
m = vstack(m)
return (hstack((tspan, x, y, vx, vy, theta, omega, m)), hstack((u1, u2)))
if __name__ == "__main__":
from PyGMO import *
from random import random
algo = algorithm.snopt(200, opt_tol=1e-5, feas_tol=1e-5)
#algo = algorithm.scipy_slsqp(max_iter = 1000,acc = 1E-8,epsilon = 1.49e-08, screen_output = True)
algo.screen_output = False
# Define the starting area (x0 will be irrelevant if pinpoint is not True)
x0b = [-1, 1]
y0b = [500, 2000]
vx0b = [-1, 1]
vy0b = [5, -40]
m0b = [8000, 12000]
# Problem definition
# Attempting to solve the QC problem
n_attempts = 50
for i in range(1, n_attempts + 1):
x0 = random() * (x0b[1] - x0b[0]) + x0b[0]
y0 = random() * (y0b[1] - y0b[0]) + y0b[0]
vx0 = random() * (vx0b[1] - vx0b[0]) + vx0b[0]
vy0 = random() * (vy0b[1] - vy0b[0]) + vy0b[0]
m0 = random() * (m0b[1] - m0b[0]) + m0b[0]
theta0 = 0.
omega0 = 0.
state0 = [x0, y0, vx0, vy0, theta0, omega0, m0]
state0 = [-1.83609715e+00,
1.44559645e+03,
6.02978623e-01, -2.16906878e+02, 4.25482258e-02,
-2.16631406e-02, 7.91244923e+04]
prob = tv_landing(state0 = state0, pinpoint=True,
homotopy=0.)
print("IC: {}".format(state0))
# Start with attempts
print("Attempt # {}".format(i), end="")
pop = population(prob)
x = (2.7725139411905354e-10,
0.005724760124809018,
2.950847498026487e-10,
-0.12013195562429928,
2.996361244192239e-10,
1.33450685205765e-10,
0.3936895621144142,
1.8687501772723236)
x=(-0.03190509234642118,
-0.04496197939657747,
-0.03769311123455317,
-0.1650460632906548,
-0.05846757985444901,
-0.019228175701329647,
0.5218293678234064,
1.490012228926392)
pop.push_back(x)
#pop.push_back(x0)
pop = algo.evolve(pop)
# Log constraints and chormosome
print("\nc: ",end="")
print(["{0:.2g}".format(it) for it in pop[0].cur_c])
print("x: ",end="")
print(["{0:.2g}".format(it) for it in pop[0].cur_x])
# If succesfull proceed
if (prob.feasibility_x(pop[0].cur_x)):
break
if not prob.feasibility_x(pop[0].cur_x):
print("No QC solution! Ending here :(")
sys.exit(0)
else:
print("Found QC solution!! Starting Homotopy")
x = pop[0].cur_x
print("state0 = {}".format(state0))
print("x = {}".format(x))
#sys.exit(0)
# We proceed to solve by homotopy the mass optimal control
# Minimum and maximum step for the continuation
h_min = 1e-8
h_max = 0.2
# Starting step
h = 0.2
trial_alpha = h
alpha = 0
x = pop[0].cur_x
algo = algorithm.scipy_slsqp(max_iter = 40,acc = 1E-8,epsilon = 1.49e-08, screen_output = True)
algo.screen_output = False
while True:
if trial_alpha > 1:
trial_alpha = 1.
print("{0:.5g}, \t {1:.5g} \t".format(alpha, trial_alpha), end="")
print("({0:.5g})\t".format(h), end="")
prob = tv_landing(state0 = state0, pinpoint=True, homotopy=trial_alpha)
pop = population(prob)
pop.push_back(x)
pop = algo.evolve(pop)
if (prob.feasibility_x(pop[0].cur_x)):
x = pop[0].cur_x
if trial_alpha == 1:
print(" Success")
break
print(" Success")
h = h * 2.
h = min(h, h_max)
alpha = trial_alpha
trial_alpha = trial_alpha + h
else:
print(" - Failed, ", end="")
print("norm c: {0:.4g}".format(norm(pop[0].cur_c)))
h = h * 0.5
if h < h_min:
print("\nContinuation step too small aborting :(")
sys.exit(0)
trial_alpha = alpha + h
| lgpl-3.0 |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 153 | 6723 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| gpl-3.0 |
shikhardb/scikit-learn | sklearn/decomposition/pca.py | 24 | 22932 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Michael Eickenberg <michael.eickenberg@inria.fr>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/ensemble/plot_voting_decision_regions.py | 1 | 3238 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three exemplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y,
s=20, edgecolor='k')
axarr[idx[0], idx[1]].set_title(tt)
# plt.show()
pltshow(plt)
| mit |
DGrady/pandas | pandas/tests/io/msgpack/test_format.py | 25 | 2882 | # coding: utf-8
from pandas.io.msgpack import unpackb
def check(src, should, use_list=0):
assert unpackb(src, use_list=use_list) == should
def testSimpleValue():
check(b"\x93\xc0\xc2\xc3", (None, False, True, ))
def testFixnum():
check(b"\x92\x93\x00\x40\x7f\x93\xe0\xf0\xff", ((0,
64,
127, ),
(-32,
-16,
-1, ), ))
def testFixArray():
check(b"\x92\x90\x91\x91\xc0", ((), ((None, ), ), ), )
def testFixRaw():
check(b"\x94\xa0\xa1a\xa2bc\xa3def", (b"", b"a", b"bc", b"def", ), )
def testFixMap():
check(b"\x82\xc2\x81\xc0\xc0\xc3\x81\xc0\x80",
{False: {None: None},
True: {None: {}}}, )
def testUnsignedInt():
check(b"\x99\xcc\x00\xcc\x80\xcc\xff\xcd\x00\x00\xcd\x80\x00"
b"\xcd\xff\xff\xce\x00\x00\x00\x00\xce\x80\x00\x00\x00"
b"\xce\xff\xff\xff\xff",
(0,
128,
255,
0,
32768,
65535,
0,
2147483648,
4294967295, ), )
def testSignedInt():
check(b"\x99\xd0\x00\xd0\x80\xd0\xff\xd1\x00\x00\xd1\x80\x00"
b"\xd1\xff\xff\xd2\x00\x00\x00\x00\xd2\x80\x00\x00\x00"
b"\xd2\xff\xff\xff\xff", (0,
-128,
-1,
0,
-32768,
-1,
0,
-2147483648,
-1, ))
def testRaw():
check(b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00"
b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab",
(b"", b"a", b"ab", b"", b"a", b"ab"))
def testArray():
check(b"\x96\xdc\x00\x00\xdc\x00\x01\xc0\xdc\x00\x02\xc2\xc3\xdd\x00"
b"\x00\x00\x00\xdd\x00\x00\x00\x01\xc0\xdd\x00\x00\x00\x02"
b"\xc2\xc3", ((), (None, ), (False, True), (), (None, ),
(False, True)))
def testMap():
check(b"\x96"
b"\xde\x00\x00"
b"\xde\x00\x01\xc0\xc2"
b"\xde\x00\x02\xc0\xc2\xc3\xc2"
b"\xdf\x00\x00\x00\x00"
b"\xdf\x00\x00\x00\x01\xc0\xc2"
b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2", ({}, {None: False},
{True: False,
None: False}, {},
{None: False},
{True: False,
None: False}))
| bsd-3-clause |
RayMick/scikit-learn | sklearn/externals/joblib/parallel.py | 79 | 35628 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Under Python 3.4+ use the 'forkserver' start method by default: this makes it
# possible to avoid crashing 3rd party libraries that manage an internal thread
# pool that does not tolerate forking
if hasattr(mp, 'get_start_method'):
method = os.environ.get('JOBLIB_START_METHOD')
if (method is None and mp.get_start_method() == 'fork'
and 'forkserver' in mp.get_all_start_methods()):
method = 'forkserver'
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = DEFAULT_MP_CONTEXT
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
akionakamura/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
eg-zhang/h2o-2 | py/testdir_single_jvm/test_GLM2_score_same.py | 9 | 4631 |
## Dataset created from this:
#
# from sklearn.datasets import make_hastie_10_2
# import numpy as np
# i = 1000000
# f = 10
# (X,y) = make_hastie_10_2(n_samples=i,random_state=None)
# y.shape = (i,1)
# Y = np.hstack((X,y))
# np.savetxt('./1mx' + str(f) + '_hastie_10_2.data', Y, delimiter=',', fmt='%.2f');
import unittest, time, sys, copy
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_util, h2o_import as h2i
def glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs, pollTimeoutSecs, **kwargs):
print "\nStarting GLM of", csvFilename
hex_key = csvFilename + ".hex"
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=60, pollTimeoutSecs=pollTimeoutSecs)
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "GLM in", (time.time() - start), "secs (python)"
h2o_glm.simpleCheckGLM(self, glm, "C8", **kwargs)
# compare this glm to the first one. since the files are replications, the results
# should be similar?
glm_model = glm['glm_model']
validation = glm_model['submodels'][0]['validation']
modelKey = glm_model['_key']
return modelKey, validation, parseResult
def glm_score(self, csvFilename, bucket, csvPathname, modelKey, modelPathname, timeoutSecs=30, pollTimeoutSecs=30):
print "\nStarting GLM score of", csvFilename
hex_key = csvFilename + ".hex"
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=timeoutSecs, pollTimeoutSecs=pollTimeoutSecs)
y = "10"
# save and restore the model
h2o.nodes[0].save_model(model=modelKey, path=modelPathname, force=1)
# FIX! should we remove the existing key to make sure it loads? really should try both cases (existing or not)
h2o.nodes[0].load_model(path=modelPathname)
start = time.time()
glmScore = h2o_cmd.runScore(dataKey=parseResult['destination_key'], modelKey=modelKey,
vactual=y, vpredict=1, expectedAuc=0.5, doAUC=False)
print "GLMScore in", (time.time() - start), "secs (python)"
h2o.verboseprint(h2o.dump_json(glmScore))
# compare this glm to the first one. since the files are replications,
# the results
# should be similar?
# UPDATE: format for returning results is slightly different than normal GLM
if self.glmScore1:
h2o_glm.compareToFirstGlm(self, 'mse', glmScore, self.glmScore1)
else:
self.glmScore1 = copy.deepcopy(glmScore)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
glmScore1 = {}
def test_GLM2_score_same(self):
# gunzip it and cat it to create 2x and 4x replications in SYNDATASETS_DIR
bucket = 'home-0xdiag-datasets'
csvFilename = "1mx10_hastie_10_2.data.gz"
csvPathname = 'standard' + '/' + csvFilename
y = "10"
kwargs = {'response': y, 'alpha': 0, 'family': 'gaussian'}
(modelKey, validation1, parseResult) = glm_doit(self, csvFilename, bucket, csvPathname,
timeoutSecs=60, pollTimeoutSecs=60, **kwargs)
print "Use", modelKey, "model on 2x and 4x replications and compare results to 1x"
filename1x = "hastie_1x.data"
pathname1x = SYNDATASETS_DIR + '/' + filename1x
fullPathname = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
h2o_util.file_gunzip(fullPathname, pathname1x)
filename2x = "hastie_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
modelPathname = SYNDATASETS_DIR + '/model_' + filename2x
bucket = None
h2o_util.file_cat(pathname1x,pathname1x,pathname2x)
glm_score(self,filename2x, bucket, pathname2x, modelKey, modelPathname, timeoutSecs=60, pollTimeoutSecs=60)
filename4x = "hastie_4x.data"
pathname4x = SYNDATASETS_DIR + '/' + filename4x
modelPathname = SYNDATASETS_DIR + '/model_' + filename4x
h2o_util.file_cat(pathname2x, pathname2x, pathname4x)
print "Iterating 3 times on this last one"
for i in range(3):
print "\nTrial #", i, "of", filename4x
glm_score(self,filename4x, bucket, pathname4x, modelKey, modelPathname, timeoutSecs=60, pollTimeoutSecs=60)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
PedroTrujilloV/nest-simulator | pynest/nest/voltage_trace.py | 12 | 6711 | # -*- coding: utf-8 -*-
#
# voltage_trace.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import numpy
import pylab
def from_file(fname, title=None, grayscale=False):
if nest.is_iterable(fname):
data = None
for f in fname:
if data is None:
data = numpy.loadtxt(f)
else:
data = numpy.concatenate((data, numpy.loadtxt(f)))
else:
data = numpy.loadtxt(fname)
if grayscale:
line_style = "k"
else:
line_style = ""
if len(data.shape) == 1:
print("INFO: only found 1 column in the file. Assuming that only one neuron was recorded.")
plotid = pylab.plot(data, line_style)
pylab.xlabel("Time (steps of length interval)")
elif data.shape[1] == 2:
print("INFO: found 2 columns in the file. Assuming them to be gid, pot.")
plotid = []
data_dict = {}
for d in data:
if not d[0] in data_dict:
data_dict[d[0]] = [d[1]]
else:
data_dict[d[0]].append(d[1])
for d in data_dict:
plotid.append(pylab.plot(data_dict[d], line_style, label="Neuron %i" % d))
pylab.xlabel("Time (steps of length interval)")
pylab.legend()
elif data.shape[1] == 3:
plotid = []
data_dict = {}
g = data[0][0]
t = []
for d in data:
if not d[0] in data_dict:
data_dict[d[0]] = [d[2]]
else:
data_dict[d[0]].append(d[2])
if d[0] == g:
t.append(d[1])
for d in data_dict:
plotid.append(pylab.plot(t, data_dict[d], line_style, label="Neuron %i" % d))
pylab.xlabel("Time (ms)")
pylab.legend()
else:
raise ValueError("Inappropriate data shape %i!" % data.shape)
if not title:
title = "Membrane potential from file '%s'" % fname
pylab.title(title)
pylab.ylabel("Membrane potential (mV)")
pylab.draw()
return plotid
def from_device(detec, neurons=None, title=None, grayscale=False, timeunit="ms"):
"""
Plot the membrane potential of a set of neurons recorded by the given voltmeter.
"""
if len(detec) > 1:
raise nest.NESTError("Please provide a single voltmeter.")
if not nest.GetStatus(detec)[0]['model'] in ('voltmeter', 'multimeter'):
raise nest.NESTError("Please provide a voltmeter or a multimeter measuring V_m.")
elif nest.GetStatus(detec)[0]['model'] == 'multimeter':
if not "V_m" in nest.GetStatus(detec, "record_from")[0]:
raise nest.NESTError("Please provide a multimeter measuring V_m.")
elif (not nest.GetStatus(detec, "to_memory")[0] and
len(nest.GetStatus(detec, "record_from")[0]) > 1):
raise nest.NESTError("Please provide a multimeter measuring only V_m or record to memory!")
if nest.GetStatus(detec, "to_memory")[0]:
timefactor = 1.0
if not nest.GetStatus(detec)[0]['time_in_steps']:
if timeunit == "s":
timefactor = 1000.0
else:
timeunit = "ms"
times, voltages = _from_memory(detec)
if not len(times):
raise nest.NESTError("No events recorded! Make sure that withtime and withgid are set to True.")
if neurons is None:
neurons = voltages.keys()
plotids = []
for neuron in neurons:
time_values = numpy.array(times[neuron]) / timefactor
if grayscale:
line_style = "k"
else:
line_style = ""
try:
plotids.append(pylab.plot(time_values, voltages[neuron], line_style, label="Neuron %i" % neuron))
except KeyError:
print("INFO: Wrong ID: {0}".format(neuron))
if not title:
title = "Membrane potential"
pylab.title(title)
pylab.ylabel("Membrane potential (mV)")
if nest.GetStatus(detec)[0]['time_in_steps']:
pylab.xlabel("Steps")
else:
pylab.xlabel("Time (%s)" % timeunit)
pylab.legend(loc="best")
pylab.draw()
return plotids
elif nest.GetStatus(detec, "to_file")[0]:
fname = nest.GetStatus(detec, "filenames")[0]
return from_file(fname, title, grayscale)
else:
raise nest.NESTError("Provided devices neither records to file, nor to memory.")
def _from_memory(detec):
import array
ev = nest.GetStatus(detec, 'events')[0]
potentials = ev['V_m']
senders = ev['senders']
v = {}
t = {}
if 'times' in ev:
times = ev['times']
for s, currentsender in enumerate(senders):
if currentsender not in v:
v[currentsender] = array.array('f')
t[currentsender] = array.array('f')
v[currentsender].append(float(potentials[s]))
t[currentsender].append(float(times[s]))
else:
# reconstruct the time vector, if not stored explicitly
detec_status = nest.GetStatus(detec)[0]
origin = detec_status['origin']
start = detec_status['start']
interval = detec_status['interval']
senders_uniq = numpy.unique(senders)
num_intvls = len(senders) / len(senders_uniq)
times_s = origin + start + interval + interval * numpy.array(range(num_intvls))
for s, currentsender in enumerate(senders):
if currentsender not in v:
v[currentsender] = array.array('f')
t[currentsender] = times_s
v[currentsender].append(float(potentials[s]))
return t, v
def show():
"""
Call pylab.show() to show all figures and enter the GUI main loop.
Python will block until all figure windows are closed again.
You should call this function only once at the end of a script.
See also: http://matplotlib.sourceforge.net/faq/howto_faq.html#use-show
"""
pylab.show()
| gpl-2.0 |
irisyuichan/news_topic_mining | news_topic_clustering.py | 1 | 8205 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
from pymongo import MongoClient
from operator import itemgetter
import jieba
client = MongoClient()
data = []
i = 0
for x in client.News.News.find():
try:
#fop = open('news_article/news_article_%d' % i, 'w')
#fop.write(x['content'].encode('utf8'))
data.append(x)
i += 1
except KeyError, TypeError:
continue
jieba.set_dictionary('dict.txt.big')
article_num = len(data)
words_table = []
for i in xrange(0, article_num):
x = data[i]
try:
words = jieba.cut(x['content'], cut_all=False)
words = [word for word in words]
words_table.append(reduce(lambda x, y: x + ' ' + y, words))
except:
continue
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=1000000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Uncomment the following to do the analysis on all the categories
categories = None
labels = article_num
true_k = 10
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(words_table)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
lsa = make_pipeline(svd, Normalizer(copy=False))
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
#print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
#print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
#print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
#print("Adjusted Rand-Index: %.3f"
# % metrics.adjusted_rand_score(labels, km.labels_))
#print("Silhouette Coefficient: %0.3f"
# % metrics.silhouette_score(X, labels, sample_size=1000))
#print()
if not (opts.n_components or opts.use_hashing):
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| mit |
iarroyof/distributionalSemanticStabilityThesis | mkl_regressor.py | 2 | 9784 | from modshogun import *
from numpy import *
from sklearn.metrics import r2_score
from scipy.stats import randint
from scipy import stats
from scipy.stats import randint as sp_randint
from scipy.stats import expon
import sys, os
import Gnuplot, Gnuplot.funcutils
class mkl_regressor():
def __init__(self, widths = None, kernel_weights = None, svm_c = 0.01, mkl_c = 1.0, svm_norm = 1, mkl_norm = 1, degree = 2,
median_width = None, width_scale = None, min_size=2, max_size = 10, kernel_size = None):
self.svm_c = svm_c
self.mkl_c = mkl_c
self.svm_norm = svm_norm
self.mkl_norm = mkl_norm
self.degree = degree
self.widths = widths
self.kernel_weights = kernel_weights
self.median_width = median_width
self.width_scale = width_scale
self.min_size = min_size
self.max_size = max_size
self.kernel_size = kernel_size
def combine_kernel(self):
self.__kernels = CombinedKernel()
for width in self.widths:
kernel = GaussianKernel()
kernel.set_width(width)
kernel.init(self.__feats_train, self.__feats_train)
self.__kernels.append_kernel(kernel)
del kernel
if self.degree > 0:
kernel = PolyKernel(10, self.degree)
kernel.init(self.__feats_train, self.__feats_train)
self.__kernels.append_kernel(kernel)
del kernel
self.__kernels.init(self.__feats_train, self.__feats_train)
def fit(self, X, y, **params):
for parameter, value in params.items():
setattr(self, parameter, value)
labels_train = RegressionLabels(y.reshape((len(y), )))
self.__feats_train = RealFeatures(X.T)
self.combine_kernel()
binary_svm_solver = SVRLight() # seems to be optional, with LibSVR it does not work.
self.__mkl = MKLRegression(binary_svm_solver)
self.__mkl.set_C(self.svm_c, self.svm_c)
self.__mkl.set_C_mkl(self.mkl_c)
self.__mkl.set_mkl_norm(self.mkl_norm)
self.__mkl.set_mkl_block_norm(self.svm_norm)
self.__mkl.set_kernel(self.__kernels)
self.__mkl.set_labels(labels_train)
try:
self.__mkl.train()
except SystemError as inst:
if "Assertion" in str(inst):
sys.stderr.write("""WARNING: Bad parameter combination: [svm_c %f mkl_c %f mkl_norm %f svm_norm %f, degree %d] \n widths %s \n
MKL error [%s]""" % (self.svm_c, self.mkl_c, self.mkl_norm, self.svm_norm, self.degree, self.widths, str(inst)))
pass
self.kernel_weights = self.__kernels.get_subkernel_weights()
self.kernel_size = len(self.kernel_weights)
self.__loaded = False
def predict(self, X):
self.__feats_test = RealFeatures(X.T)
ft = None
if not self.__loaded:
self.__kernels.init(self.__feats_train, self.__feats_test) # test for test
self.__mkl.set_kernel(self.__kernels)
else:
ft = CombinedFeatures()
for i in xrange(self.__mkl.get_kernel().get_num_subkernels()):
ft.append_feature_obj(self.__feats_test)
return self.__mkl.apply_regression(ft).get_labels()
def set_params(self, **params):
for parameter, value in params.items():
setattr(self, parameter, value)
if self.median_width: # If widths are specified, the specified median has priority, so widths will be automatically overwritten.
self.set_param_weights()
return self
def get_params(self, deep=False):
return {param: getattr(self, param) for param in dir(self) if not param.startswith('__') and not '__' in param and not callable(getattr(self,param))}
def score(self, X_t, y_t):
predicted = self.predict(X_t)
return r2_score(predicted, y_t)
def serialize_model (self, file_name, sl="save"):
from os.path import basename, dirname
from bz2 import BZ2File
import pickle
if sl == "save": mode = "wb"
elif sl == "load": mode = "rb"
else: sys.stderr.write("Bad option. Only 'save' and 'load' are available.")
f = BZ2File(file_name + ".bin", mode)
if not f:
sys.stderr.write("Error serializing kernel matrix.")
exit()
if sl == "save":
#self.feats_train.save_serializable(fstream)
#os.unlink(file_name)
pickle.dump(self.__mkl, f, protocol=2)
elif sl == "load":
#self.feats_train = RealFeatures()
#self.feats_train.load_serializable(fstream)
mkl = self.__mkl = pickle.load(f)
self.__loaded = True
else: sys.stderr.write("Bad option. Only 'save' and 'load' are available.")
def save(self, file_name = None):
""" Python reimplementated function for saving a pretrained MKL machine.
This method saves a trained MKL machine to the file 'file_name'. If not 'file_name' is given, a
dictionary 'mkl_machine' containing parameters of the given trained MKL object is returned.
Here we assumed all subkernels of the passed CombinedKernel are of the same family, so uniquely the
first kernel is used for verifying if the passed 'kernel' is a Gaussian mixture. If it is so, we insert
the 'widths' to the model dictionary 'mkl_machine'. An error is returned otherwise.
"""
self._support = []
self._num_support_vectors = self.__mkl.get_num_support_vectors()
self._bias = self.__mkl.get_bias()
for i in xrange(self._num_support_vectors):
self._support.append((self.__mkl.get_alpha(i), self.__mkl.get_support_vector(i)))
self._kernel_family = self.__kernels.get_first_kernel().get_name()
if file_name:
with open(file_name,'w') as f:
f.write(str(self.get_params())+'\n')
self.serialize_model(file_name, "save")
else:
return self.get_params()
def load(self, file_name):
""" This method receives a 'file.model' file name (if it is not in pwd, full path must be given). The loaded file
must contain at least a dictionary at its top. This dictionary must contain keys from which model
parameters will be read (including weights, C, etc.). For example:
{'bias': value, 'param_1': value,...,'support_vectors': [(idx, value),(idx, value)], param_n: value}
The MKL model is tuned to those parameters stored at the given file. Other file with double extension must
be jointly with the model file: '*file.model.bin' where the kernel matrix is encoded together with the kernel
machine.
"""
# Load machine parameters
with open(file_name, 'r') as pointer:
mkl_machine = eval(pointer.read())
# Set loaded parameters
for parameter, value in mkl_machine.items():
setattr(self, parameter, value)
# Load the machine itself
self.serialize_model(file_name, "load") # Instantiates the loaded MKL.
return self
def set_param_weights(self):
"""Gives a vector of weights which distribution is linear. The 'median' value is used both as location parameter and
for scaling parameter. If not size of the output vector is given, a random size between 'min_size' and 'max_size' is
returned."""
assert self.median_width and self.width_scale and self.kernel_size # Width generation needed parameters
self.minimun_width_scale = 0.01
self.widths = linspace(start = self.median_width*self.minimun_width_scale,
stop = self.median_width*self.width_scale,
num = self.kernel_size)
class expon_vector(stats.rv_continuous):
def __init__(self, loc = 1.0, scale = None, min_size=2, max_size = 10, size = None):
self.loc = loc
self.scale = scale
self.min_size = min_size
self.max_size = max_size
self.size = size
def rvs(self):
if not self.size:
self.size = randint.rvs(low = self.min_size, high = self.max_size, size = 1)
if self.scale:
return expon.rvs(loc = self.loc * 0.09, scale = self.scale, size = self.size)
else:
return expon.rvs(loc = self.loc * 0.09, scale = self.loc * 8.0, size = self.size)
def test_predict(data, machine = None, file=None, labels = None, out_file=None):
g = Gnuplot.Gnuplot()
if type(machine) is str:
if "mkl_regerssion" == machine:
machine_ = mkl_regressor()
machine_.load(model_file)
# elif other machine types ...
else:
print "Error machine type"
exit()
# elif other machine types ...
else:
machine_ = machine
preds = machine_.predict(data)
if labels is not None:
r2 = r2_score(preds, labels)
print "R^2: ", r2
pred, real = zip(*sorted(zip(preds, labels), key=lambda tup: tup[1]))
else:
pred = preds; real = range(len(pred))
if out_file:
output = {}
output['learned_model'] = out_file
output['estimated_output'] = preds
output['best_params'] = machine_.get_params()
output['performance'] = r2
with open(out_file, "a") as f:
f.write(str(output)+'\n')
print "Machine Parameters: ", machine_.get_params()
g.plot(Gnuplot.Data(pred, with_="lines"), Gnuplot.Data(real, with_="linesp") )
| gpl-2.0 |
H3rsh3/odm-py-templating | example/templp.py | 2 | 3594 | import pandas as pd
import numpy as np
import shutil
import fileinput
import sys
import time
import subprocess
class Import_conf():
def __init__(self,i_data,i_base):
self.i_data = i_data
self.i_base = i_base
def generate_ic(self):
#--import config_data file
config_data = pd.read_csv("{0}".format(self.i_data), index_col='host')
#--get the list of all hosts#
# create ouput dir
mkdir_o = subprocess.Popen("mkdir {0}_output".format(self.i_base), shell=True)
# wilt untill dir is created
mkdir_ow = mkdir_o.communicate()[0]
xhostconfig_hosts = config_data.index
for host in xhostconfig_hosts:
print "generating config for {0}".format(host)
#--geenerate a copy of a the base file
shutil.copy2("{0}".format(self.i_base), "{0}_output/{1}".format(self.i_base,host))
#--get the row for the host, ie the host to genereate conig for
xhostconfig = config_data.loc["{0}".format(host)]
#--get the columns of the host, ie the items to replace
xhostconfig_item = config_data.columns
#--for each config_item(column) replace the config file
for config_item in xhostconfig_item:
#--get the "find" item to replace
replace_item = config_item
#--get the replacement value
replace_item_with = xhostconfig["{0}".format(config_item)]
#--find the config item to replace
#--replace the config item
hostconfig_file = fileinput.input(files=("{0}_output/{1}".format(self.i_base,host)),inplace=1)
for line in hostconfig_file:
replace = line.replace("<{0}>".format(replace_item), "{0}".format(replace_item_with))
print replace,
#time.sleep(.50)
hostconfig_file.close()
print "{0} done".format(host)
print "======================"
def file_list(self):
filelist = subprocess.check_output(['ls -l {0}_output | awk -F\' \' \'{{ print $9 }}\' | sort -n '.format(self.i_base)], shell=True)
filelist = filelist.split()
return filelist
def concat_ic(self,o_file):
# clear out the file
open("{0}".format(o_file), 'w').close()
# open file to append
with open("{0}".format(o_file), "a") as base_file:
for file in Import_conf.file_list(self):
print ("starting {0}".format(file))
with open("{0}_output/{1}".format(self.i_base,file), "r") as source_file:
for line in source_file:
base_file.write(line)
#print line,
source_file.close()
base_file.close()
def concat_h_ic(self,ci_data,i_index):
baseseries = pd.read_csv("{0}".format(ci_data), index_col='host')
baseseries_idx = baseseries.index
basedf = pd.DataFrame(index=baseseries_idx)
#print basedf
for df in i_index:
adt_filelist = subprocess.check_output(['ls -l {0}_output | awk -F\' \' \'{{ print $9 }}\' | sort -n '.format(df)], shell=True)
adt_filelist_s = adt_filelist.split()
adt_series = pd.Index(adt_filelist_s)
adt_df = pd.DataFrame(index=adt_series, columns=["{0}".format(df)])
adt_df[[0]]= "p"
basedf = pd.concat([basedf, adt_df], axis=1)
#print basedf
mkdir_ov = subprocess.Popen("mkdir {0}_voutput".format(ci_data), shell=True)
mkdir_ovw = mkdir_ov.communicate()[0]
for host in basedf.index:
open("{0}_voutput/{1}".format(ci_data,host), 'w').close()
with open("{0}_voutput/{1}".format(ci_data,host), "a") as dest_file:
presentfiles = basedf.loc["{0}".format(host)].dropna()
presentfiles_index = presentfiles.index
print "==================combining files {0}".format(host)
for p_files in presentfiles_index:
with open("{0}_output/{1}".format(p_files,host), "r") as src_file:
for line in src_file:
dest_file.write(line) | gpl-3.0 |
badbytes/pymeg | meg/get.py | 1 | 1332 | """Return sensors and headshape positions"""
from msiread import getposted
from numpy import *
pdf=getposted.read()
hs=pdf.head_shape.hs_points
m=pdf.GetSignalMEGDevices()
x=[]; y=[]; z=[];
class headshape():
"""hs=pos.headshape.hsm"""
hsa=array(hs)
hsm=zeros((size(hsa),3))
for i in range(len(hsa)):
hsm[i,:]=( hsa[i].x, hsa[i].y, hsa[i].z);
class sensors():
"""chu=pos.headshape.chu"""
"""chl=pos.headshape.chl"""
cha=array(m)
chl=zeros((size(cha),3))
chu=zeros((size(cha),3))
for i in range(len(cha)):
chl[i,:]=( cha[i].loops[0].Position.x, cha[i].loops[0].Position.y, cha[i].loops[0].Position.z);
chu[i,:]=( cha[i].loops[1].Position.x, cha[i].loops[1].Position.y, cha[i].loops[1].Position.z);
class plot(headshape):
def __init__(self,data):
headshape.__init__(self)
import pylab as p
import matplotlib.axes3d as p3
fig=p.figure()
ax = p3.Axes3D(fig)
ax.scatter(data[:,0],data[:,1],data[:,2])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
mn=data.min()
mx=data.max()
ax.set_xlim(data[:,0].min(),data[:,0].max())
ax.set_ylim(data[:,1].min(),data[:,1].max())
ax.set_zlim(data[:,2].min(),data[:,2].max())
p.show()
| gpl-3.0 |
dpshelio/sunpy | sunpy/map/sources/trace.py | 2 | 3151 | """TRACE Map subclass definitions"""
#pylint: disable=W0221,W0222,E1101,E1121
__author__ = "Jack Ireland"
__email__ = "jack.ireland@nasa.gov"
import matplotlib.pyplot as plt
from astropy.visualization import LogStretch
from astropy.visualization.mpl_normalize import ImageNormalize
from sunpy.map import GenericMap
from sunpy.map.sources.source_type import source_stretch
__all__ = ['TRACEMap']
class TRACEMap(GenericMap):
"""TRACE Image Map
The Transition Region and Coronal Explorer was a
NASA Small Explorer (SMEX) mission to image the
solar corona and transition region at high angular and temporal resolution.
TRACE observed the Sun in the following passbands, 5000 A, 1700 A, 1600 A,
1550 A (C IV), 1216 A (H1 Lyman-alpha), 173 A (Fe IX), 195 A (Fe XII),
and 284 A (Fe XV). TRACE provides solar images with an 8.5 x 8.5 arcminute
field of view and 0.5 arcsecond pixels. It was placed in a sun-synchronous
orbit, enabling it to make continuous solar observations.
The TRACE mission operated was launched on 2 April 1998 and obtained its
last science image on 6 June 2010 23:56 UT.
References
----------
* `Mission/Instrument Page <https://sdowww.lmsal.com/TRACE>`_
* `Fits headers <https://sdowww.lmsal.com/TRACE/Project/Instrument/cal/>`_
* `Analysis Guide <https://sdowww.lmsal.com/TRACE/tag/>`_
* `Passband reference <https://sdowww.lmsal.com/TRACE/Project/Instrument/inspass.htm>`_
.. note::
Note that this map definition is currently only being tested on JPEG2000
files. TRACE FITS data is stored in a more complex format. Typically
TRACE data is stored in hourly "tri" files that store all the data taken
by TRACE in the hour indicated by the filename. Those files must first be
understood and parsed to obtain the science data. The ability to do this
is not yet in SunPy, but is available in SSWIDL. Please refer to the links
above concerning how to read "tri" files in SSWIDL.
"""
def __init__(self, data, header, **kwargs):
# Assume pixel units are arcesc if not given
header['cunit1'] = header.get('cunit1', 'arcsec')
header['cunit2'] = header.get('cunit2', 'arcsec')
GenericMap.__init__(self, data, header, **kwargs)
# It needs to be verified that these must actually be set and are not
# already in the header.
self.meta['detector'] = "TRACE"
self.meta['obsrvtry'] = "TRACE"
self._nickname = self.detector
# Colour maps
self.plot_settings['cmap'] = plt.get_cmap('trace' + str(self.meta['WAVE_LEN']))
self.plot_settings['norm'] = ImageNormalize(stretch=source_stretch(self.meta, LogStretch()))
@classmethod
def is_datasource_for(cls, data, header, **kwargs):
"""Determines if header corresponds to an TRACE image"""
return header.get('instrume') == 'TRACE'
@property
def measurement(self):
"""
Returns the measurement type.
"""
s = self.meta['WAVE_LEN']
if s == 'WL':
s = 'white-light'
return s
| bsd-2-clause |
awalls-cx18/gnuradio | gr-filter/examples/reconstruction.py | 7 | 5011 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, digital
from gnuradio import filter
from gnuradio import blocks
import sys
import numpy
try:
from gnuradio import channels
except ImportError:
print("Error: Program requires gr-channels.")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
print("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).")
sys.exit(1)
fftlen = 8192
def main():
N = 10000
fs = 2000.0
Ts = 1.0 / fs
t = numpy.arange(0, N*Ts, Ts)
# When playing with the number of channels, be careful about the filter
# specs and the channel map of the synthesizer set below.
nchans = 10
# Build the filter(s)
bw = 1000
tb = 400
proto_taps = filter.firdes.low_pass_2(1, nchans*fs,
bw, tb, 80,
filter.firdes.WIN_BLACKMAN_hARRIS)
print("Filter length: ", len(proto_taps))
# Create a modulated signal
npwr = 0.01
data = numpy.random.randint(0, 256, N)
rrc_taps = filter.firdes.root_raised_cosine(1, 2, 1, 0.35, 41)
src = blocks.vector_source_b(data.astype(numpy.uint8).tolist(), False)
mod = digital.bpsk_mod(samples_per_symbol=2)
chan = channels.channel_model(npwr)
rrc = filter.fft_filter_ccc(1, rrc_taps)
# Split it up into pieces
channelizer = filter.pfb.channelizer_ccf(nchans, proto_taps, 2)
# Put the pieces back together again
syn_taps = [nchans*t for t in proto_taps]
synthesizer = filter.pfb_synthesizer_ccf(nchans, syn_taps, True)
src_snk = blocks.vector_sink_c()
snk = blocks.vector_sink_c()
# Remap the location of the channels
# Can be done in synth or channelizer (watch out for rotattions in
# the channelizer)
synthesizer.set_channel_map([ 0, 1, 2, 3, 4,
15, 16, 17, 18, 19])
tb = gr.top_block()
tb.connect(src, mod, chan, rrc, channelizer)
tb.connect(rrc, src_snk)
vsnk = []
for i in range(nchans):
tb.connect((channelizer,i), (synthesizer, i))
vsnk.append(blocks.vector_sink_c())
tb.connect((channelizer,i), vsnk[i])
tb.connect(synthesizer, snk)
tb.run()
sin = numpy.array(src_snk.data()[1000:])
sout = numpy.array(snk.data()[1000:])
# Plot original signal
fs_in = nchans*fs
f1 = pyplot.figure(1, figsize=(16,12), facecolor='w')
s11 = f1.add_subplot(2,2,1)
s11.psd(sin, NFFT=fftlen, Fs=fs_in)
s11.set_title("PSD of Original Signal")
s11.set_ylim([-200, -20])
s12 = f1.add_subplot(2,2,2)
s12.plot(sin.real[1000:1500], "o-b")
s12.plot(sin.imag[1000:1500], "o-r")
s12.set_title("Original Signal in Time")
start = 1
skip = 2
s13 = f1.add_subplot(2,2,3)
s13.plot(sin.real[start::skip], sin.imag[start::skip], "o")
s13.set_title("Constellation")
s13.set_xlim([-2, 2])
s13.set_ylim([-2, 2])
# Plot channels
nrows = int(numpy.sqrt(nchans))
ncols = int(numpy.ceil(float(nchans) / float(nrows)))
f2 = pyplot.figure(2, figsize=(16,12), facecolor='w')
for n in range(nchans):
s = f2.add_subplot(nrows, ncols, n+1)
s.psd(vsnk[n].data(), NFFT=fftlen, Fs=fs_in)
s.set_title("Channel {0}".format(n))
s.set_ylim([-200, -20])
# Plot reconstructed signal
fs_out = 2*nchans*fs
f3 = pyplot.figure(3, figsize=(16,12), facecolor='w')
s31 = f3.add_subplot(2,2,1)
s31.psd(sout, NFFT=fftlen, Fs=fs_out)
s31.set_title("PSD of Reconstructed Signal")
s31.set_ylim([-200, -20])
s32 = f3.add_subplot(2,2,2)
s32.plot(sout.real[1000:1500], "o-b")
s32.plot(sout.imag[1000:1500], "o-r")
s32.set_title("Reconstructed Signal in Time")
start = 0
skip = 4
s33 = f3.add_subplot(2,2,3)
s33.plot(sout.real[start::skip], sout.imag[start::skip], "o")
s33.set_title("Constellation")
s33.set_xlim([-2, 2])
s33.set_ylim([-2, 2])
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
RBDA-F17/crime | code_drop_2/filter_clean_crime.py | 2 | 1179 | import os
import sys
import pandas as pd
from pyspark.sql.types import *
from pyspark.sql import Row, Column
from pyspark.sql.functions import *
from datetime import datetime
from pyspark import SparkConf, SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import udf
user = os. environ['USER']
if user not in ['cpa253','vaa238','vm1370']:
user = 'cpa253'
if not sc:
sc = SparkContext()
if not sqlContext:
sqlContext = SQLContext(sc)
weather = sqlContext.read.parquet('/user/%s/rbda/crime/data/weather_clean' %(user) ).filter("year(time) >= 2009")
def get_station(lon,lat):
s_coords = pd.DataFrame({
'station': [
"Central Park",
"La Guardia",
"JFK",
]
,
'lat': [
40.782483,
40.776212,
40.640773,
],
'lon':[
-73.965816,
-73.874009,
-73.779180,
]
})
if not lon:
return None
if not lon:
return
s_coords['dist'] = (s_coords.lon - lon)**2 + (s_coords.lat - lat)**2
ind = s_coords['dist'].idxmin(axis=0)
out = s_coords.station[ind]
return out
get_station_udf = udf( get_station )
file_name = '/user/%s/rbda/crime/data/crime_clean' %(user)
df = sqlContext.read.parquet(file_name)
| gpl-3.0 |
robin-lai/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 77 | 1820 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
metaml/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/offsetbox.py | 69 | 17728 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.patches import bbox_artist as mbbox_artist
DEBUG=False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
sep = (total - sum(w_list))/(len(w_list)-1.)
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh+sep)*len(w_list)
else:
sep = float(total)/(len(w_list)) - maxh
offsets = np.array([(maxh+sep)*i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Geiven a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h-d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left","top"]:
descent=0.
offsets = [d for h, d in hd_list]
elif align in ["right","bottom"]:
descent=0.
offsets = [height-h+d for h, d in hd_list]
elif align == "center":
descent=0.
offsets = [(height-h)*.5+d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd)
return mtransforms.Bbox.from_bounds(px-xd, py-yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent)
for c, (ox, oy) in zip(self.get_children(), offsets):
c.set_offset((px+ox, py+oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
whd_list = [(w, h, xd, (h-yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w,h,xd,yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
self.sep, self.mode)
yoffsets = yoffsets_ + [yd for w,h,xd,yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent+self.pad, ydescent+self.pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
if self.height is None:
height_descent = max([h-yd for w,h,xd,yd in whd_list])
ydescent = max([yd for w,h,xd,yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2*self._pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w,h,xd,yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
self.sep, self.mode)
xoffsets = xoffsets_ + [xd for w,h,xd,yd in whd_list]
xdescent=whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent + self.pad, ydescent + self.pad, \
zip(xoffsets, yoffsets)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
return self.width, self.height, self.xdescent, self.ydescent
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if not textprops.has_key("va"):
textprops["va"]="baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform+self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[0][0] # first line
_, hh, dd = renderer.get_text_width_height_descent(
clean_line, self._text._fontproperties, ismath=ismath)
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline: # multi line
d = h-(hh-dd) # the baseline of the first line
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h-dd)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(dd, d_)
else:
d = dd
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
| agpl-3.0 |
alexsavio/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 41 | 2672 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
hlin117/statsmodels | statsmodels/stats/power.py | 31 | 47523 | # -*- coding: utf-8 -*-
#pylint: disable-msg=W0142
"""Statistical power, solving for nobs, ... - trial version
Created on Sat Jan 12 21:48:06 2013
Author: Josef Perktold
Example
roundtrip - root with respect to all variables
calculated, desired
nobs 33.367204205 33.367204205
effect 0.5 0.5
alpha 0.05 0.05
power 0.8 0.8
TODO:
refactoring
- rename beta -> power, beta (type 2 error is beta = 1-power) DONE
- I think the current implementation can handle any kinds of extra keywords
(except for maybe raising meaningful exceptions
- streamline code, I think internally classes can be merged
how to extend to k-sample tests?
user interface for different tests that map to the same (internal) test class
- sequence of arguments might be inconsistent,
arg and/or kwds so python checks what's required and what can be None.
- templating for docstrings ?
"""
from __future__ import print_function
from statsmodels.compat.python import iteritems
import numpy as np
from scipy import stats, optimize
from statsmodels.tools.rootfinding import brentq_expanding
def ttest_power(effect_size, nobs, alpha, df=None, alternative='two-sided'):
'''Calculate power of a ttest
'''
d = effect_size
if df is None:
df = nobs - 1
if alternative in ['two-sided', '2s']:
alpha_ = alpha / 2. #no inplace changes, doesn't work
elif alternative in ['smaller', 'larger']:
alpha_ = alpha
else:
raise ValueError("alternative has to be 'two-sided', 'larger' " +
"or 'smaller'")
pow_ = 0
if alternative in ['two-sided', '2s', 'larger']:
crit_upp = stats.t.isf(alpha_, df)
#print crit_upp, df, d*np.sqrt(nobs)
# use private methods, generic methods return nan with negative d
if np.any(np.isnan(crit_upp)):
# avoid endless loop, https://github.com/scipy/scipy/issues/2667
pow_ = np.nan
else:
pow_ = stats.nct._sf(crit_upp, df, d*np.sqrt(nobs))
if alternative in ['two-sided', '2s', 'smaller']:
crit_low = stats.t.ppf(alpha_, df)
#print crit_low, df, d*np.sqrt(nobs)
if np.any(np.isnan(crit_low)):
pow_ = np.nan
else:
pow_ += stats.nct._cdf(crit_low, df, d*np.sqrt(nobs))
return pow_
def normal_power(effect_size, nobs, alpha, alternative='two-sided', sigma=1.):
'''Calculate power of a normal distributed test statistic
'''
d = effect_size
if alternative in ['two-sided', '2s']:
alpha_ = alpha / 2. #no inplace changes, doesn't work
elif alternative in ['smaller', 'larger']:
alpha_ = alpha
else:
raise ValueError("alternative has to be 'two-sided', 'larger' " +
"or 'smaller'")
pow_ = 0
if alternative in ['two-sided', '2s', 'larger']:
crit = stats.norm.isf(alpha_)
pow_ = stats.norm.sf(crit - d*np.sqrt(nobs)/sigma)
if alternative in ['two-sided', '2s', 'smaller']:
crit = stats.norm.ppf(alpha_)
pow_ += stats.norm.cdf(crit - d*np.sqrt(nobs)/sigma)
return pow_
def ftest_anova_power(effect_size, nobs, alpha, k_groups=2, df=None):
'''power for ftest for one way anova with k equal sized groups
nobs total sample size, sum over all groups
should be general nobs observations, k_groups restrictions ???
'''
df_num = nobs - k_groups
df_denom = k_groups - 1
crit = stats.f.isf(alpha, df_denom, df_num)
pow_ = stats.ncf.sf(crit, df_denom, df_num, effect_size**2 * nobs)
return pow_#, crit
def ftest_power(effect_size, df_num, df_denom, alpha, ncc=1):
'''Calculate the power of a F-test.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
df_num : int or float
numerator degrees of freedom.
df_denom : int or float
denominator degrees of freedom.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ncc : int
degrees of freedom correction for non-centrality parameter.
see Notes
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
Notes
-----
sample size is given implicitly by df_num
set ncc=0 to match t-test, or f-test in LikelihoodModelResults.
ncc=1 matches the non-centrality parameter in R::pwr::pwr.f2.test
ftest_power with ncc=0 should also be correct for f_test in regression
models, with df_num and d_denom as defined there. (not verified yet)
'''
nc = effect_size**2 * (df_denom + df_num + ncc)
crit = stats.f.isf(alpha, df_denom, df_num)
pow_ = stats.ncf.sf(crit, df_denom, df_num, nc)
return pow_ #, crit, nc
#class based implementation
#--------------------------
class Power(object):
'''Statistical Power calculations, Base Class
so far this could all be class methods
'''
def __init__(self, **kwds):
self.__dict__.update(kwds)
# used only for instance level start values
self.start_ttp = dict(effect_size=0.01, nobs=10., alpha=0.15,
power=0.6, nobs1=10., ratio=1,
df_num=10, df_denom=3 # for FTestPower
)
# TODO: nobs1 and ratio are for ttest_ind,
# need start_ttp for each test/class separately,
# possible rootfinding problem for effect_size, starting small seems to
# work
from collections import defaultdict
self.start_bqexp = defaultdict(dict)
for key in ['nobs', 'nobs1', 'df_num', 'df_denom']:
self.start_bqexp[key] = dict(low=2., start_upp=50.)
for key in ['df_denom']:
self.start_bqexp[key] = dict(low=1., start_upp=50.)
for key in ['ratio']:
self.start_bqexp[key] = dict(low=1e-8, start_upp=2)
for key in ['alpha']:
self.start_bqexp[key] = dict(low=1e-12, upp=1 - 1e-12)
def power(self, *args, **kwds):
raise NotImplementedError
def _power_identity(self, *args, **kwds):
power_ = kwds.pop('power')
return self.power(*args, **kwds) - power_
def solve_power(self, **kwds):
'''solve for any one of the parameters of a t-test
for t-test the keywords are:
effect_size, nobs, alpha, power
exactly one needs to be ``None``, all others need numeric values
*attaches*
cache_fit_res : list
Cache of the result of the root finding procedure for the latest
call to ``solve_power``, mainly for debugging purposes.
The first element is the success indicator, one if successful.
The remaining elements contain the return information of the up to
three solvers that have been tried.
'''
#TODO: maybe use explicit kwds,
# nicer but requires inspect? and not generic across tests
# I'm duplicating this in the subclass to get informative docstring
key = [k for k,v in iteritems(kwds) if v is None]
#print kwds, key;
if len(key) != 1:
raise ValueError('need exactly one keyword that is None')
key = key[0]
if key == 'power':
del kwds['power']
return self.power(**kwds)
self._counter = 0
def func(x):
kwds[key] = x
fval = self._power_identity(**kwds)
self._counter += 1
#print self._counter,
if self._counter > 500:
raise RuntimeError('possible endless loop (500 NaNs)')
if np.isnan(fval):
return np.inf
else:
return fval
#TODO: I'm using the following so I get a warning when start_ttp is not defined
try:
start_value = self.start_ttp[key]
except KeyError:
start_value = 0.9
print('Warning: using default start_value for {0}'.format(key))
fit_kwds = self.start_bqexp[key]
fit_res = []
#print vars()
try:
val, res = brentq_expanding(func, full_output=True, **fit_kwds)
failed = False
fit_res.append(res)
except ValueError:
failed = True
fit_res.append(None)
success = None
if (not failed) and res.converged:
success = 1
else:
# try backup
#TODO: check more cases to make this robust
val, infodict, ier, msg = optimize.fsolve(func, start_value,
full_output=True) #scalar
#val = optimize.newton(func, start_value) #scalar
fval = infodict['fvec']
fit_res.append(infodict)
if ier == 1 and np.abs(fval) < 1e-4 :
success = 1
else:
#print infodict
if key in ['alpha', 'power', 'effect_size']:
val, r = optimize.brentq(func, 1e-8, 1-1e-8,
full_output=True) #scalar
success = 1 if r.converged else 0
fit_res.append(r)
else:
success = 0
if not success == 1:
import warnings
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
convergence_doc)
warnings.warn(convergence_doc, ConvergenceWarning)
#attach fit_res, for reading only, should be needed only for debugging
fit_res.insert(0, success)
self.cache_fit_res = fit_res
return val
def plot_power(self, dep_var='nobs', nobs=None, effect_size=None,
alpha=0.05, ax=None, title=None, plt_kwds=None, **kwds):
'''plot power with number of observations or effect size on x-axis
Parameters
----------
dep_var : string in ['nobs', 'effect_size', 'alpha']
This specifies which variable is used for the horizontal axis.
If dep_var='nobs' (default), then one curve is created for each
value of ``effect_size``. If dep_var='effect_size' or alpha, then
one curve is created for each value of ``nobs``.
nobs : scalar or array_like
specifies the values of the number of observations in the plot
effect_size : scalar or array_like
specifies the values of the effect_size in the plot
alpha : float or array_like
The significance level (type I error) used in the power
calculation. Can only be more than a scalar, if ``dep_var='alpha'``
ax : None or axis instance
If ax is None, than a matplotlib figure is created. If ax is a
matplotlib axis instance, then it is reused, and the plot elements
are created with it.
title : string
title for the axis. Use an empty string, ``''``, to avoid a title.
plt_kwds : None or dict
not used yet
kwds : optional keywords for power function
These remaining keyword arguments are used as arguments to the
power function. Many power function support ``alternative`` as a
keyword argument, two-sample test support ``ratio``.
Returns
-------
fig : matplotlib figure instance
Notes
-----
This works only for classes where the ``power`` method has
``effect_size``, ``nobs`` and ``alpha`` as the first three arguments.
If the second argument is ``nobs1``, then the number of observations
in the plot are those for the first sample.
TODO: fix this for FTestPower and GofChisquarePower
TODO: maybe add line variable, if we want more than nobs and effectsize
'''
#if pwr_kwds is None:
# pwr_kwds = {}
from statsmodels.graphics import utils
from statsmodels.graphics.plottools import rainbow
fig, ax = utils.create_mpl_ax(ax)
import matplotlib.pyplot as plt
colormap = plt.cm.Dark2 #pylint: disable-msg=E1101
plt_alpha = 1 #0.75
lw = 2
if dep_var == 'nobs':
colors = rainbow(len(effect_size))
colors = [colormap(i) for i in np.linspace(0, 0.9, len(effect_size))]
for ii, es in enumerate(effect_size):
power = self.power(es, nobs, alpha, **kwds)
ax.plot(nobs, power, lw=lw, alpha=plt_alpha,
color=colors[ii], label='es=%4.2F' % es)
xlabel = 'Number of Observations'
elif dep_var in ['effect size', 'effect_size', 'es']:
colors = rainbow(len(nobs))
colors = [colormap(i) for i in np.linspace(0, 0.9, len(nobs))]
for ii, n in enumerate(nobs):
power = self.power(effect_size, n, alpha, **kwds)
ax.plot(effect_size, power, lw=lw, alpha=plt_alpha,
color=colors[ii], label='N=%4.2F' % n)
xlabel = 'Effect Size'
elif dep_var in ['alpha']:
# experimental nobs as defining separate lines
colors = rainbow(len(nobs))
for ii, n in enumerate(nobs):
power = self.power(effect_size, n, alpha, **kwds)
ax.plot(alpha, power, lw=lw, alpha=plt_alpha,
color=colors[ii], label='N=%4.2F' % n)
xlabel = 'alpha'
else:
raise ValueError('depvar not implemented')
if title is None:
title = 'Power of Test'
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.legend(loc='lower right')
return fig
class TTestPower(Power):
'''Statistical Power calculations for one sample or paired sample t-test
'''
def power(self, effect_size, nobs, alpha, df=None, alternative='two-sided'):
'''Calculate the power of a t-test for one sample or paired samples.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
df : int or float
degrees of freedom. By default this is None, and the df from the
one sample or paired ttest is used, ``df = nobs1 - 1``
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
# for debugging
#print 'calling ttest power with', (effect_size, nobs, alpha, df, alternative)
return ttest_power(effect_size, nobs, alpha, df=df,
alternative=alternative)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs=None, alpha=None, power=None,
alternative='two-sided'):
'''solve for any one parameter of the power of a one sample t-test
for the one sample t-test the keywords are:
effect_size, nobs, alpha, power
Exactly one needs to be ``None``, all others need numeric values.
This test can also be used for a paired t-test, where effect size is
defined in terms of the mean difference, and nobs is the number of
pairs.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
alternative : string, 'two-sided' (default) or 'one-sided'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test.
'one-sided' assumes we are in the relevant tail.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remaining parameters.
*attaches*
cache_fit_res : list
Cache of the result of the root finding procedure for the latest
call to ``solve_power``, mainly for debugging purposes.
The first element is the success indicator, one if successful.
The remaining elements contain the return information of the up to
three solvers that have been tried.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
# for debugging
#print 'calling ttest solve with', (effect_size, nobs, alpha, power, alternative)
return super(TTestPower, self).solve_power(effect_size=effect_size,
nobs=nobs,
alpha=alpha,
power=power,
alternative=alternative)
class TTestIndPower(Power):
'''Statistical Power calculations for t-test for two independent sample
currently only uses pooled variance
'''
def power(self, effect_size, nobs1, alpha, ratio=1, df=None,
alternative='two-sided'):
'''Calculate the power of a t-test for two independent sample
Parameters
----------
effect_size : float
standardized effect size, difference between the two means divided
by the standard deviation. `effect_size` has to be positive.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ratio given the other
arguments, it has to be explicitly set to None.
df : int or float
degrees of freedom. By default this is None, and the df from the
ttest with pooled variance is used, ``df = (nobs1 - 1 + nobs2 - 1)``
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
nobs2 = nobs1*ratio
#pooled variance
if df is None:
df = (nobs1 - 1 + nobs2 - 1)
nobs = 1./ (1. / nobs1 + 1. / nobs2)
#print 'calling ttest power with', (effect_size, nobs, alpha, df, alternative)
return ttest_power(effect_size, nobs, alpha, df=df, alternative=alternative)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs1=None, alpha=None, power=None,
ratio=1., alternative='two-sided'):
'''solve for any one parameter of the power of a two sample t-test
for t-test the keywords are:
effect_size, nobs1, alpha, power, ratio
exactly one needs to be ``None``, all others need numeric values
Parameters
----------
effect_size : float
standardized effect size, difference between the two means divided
by the standard deviation. `effect_size` has to be positive.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ratio given the other
arguments it has to be explicitly set to None.
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remaining parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(TTestIndPower, self).solve_power(effect_size=effect_size,
nobs1=nobs1,
alpha=alpha,
power=power,
ratio=ratio,
alternative=alternative)
class NormalIndPower(Power):
'''Statistical Power calculations for z-test for two independent samples.
currently only uses pooled variance
'''
def __init__(self, ddof=0, **kwds):
self.ddof = ddof
super(NormalIndPower, self).__init__(**kwds)
def power(self, effect_size, nobs1, alpha, ratio=1,
alternative='two-sided'):
'''Calculate the power of a t-test for two independent sample
Parameters
----------
effect_size : float
standardized effect size, difference between the two means divided
by the standard deviation. effect size has to be positive.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
``ratio`` can be set to zero in order to get the power for a
one sample test.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ratio given the other
arguments it has to be explicitly set to None.
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
ddof = self.ddof # for correlation, ddof=3
# get effective nobs, factor for std of test statistic
if ratio > 0:
nobs2 = nobs1*ratio
#equivalent to nobs = n1*n2/(n1+n2)=n1*ratio/(1+ratio)
nobs = 1./ (1. / (nobs1 - ddof) + 1. / (nobs2 - ddof))
else:
nobs = nobs1 - ddof
return normal_power(effect_size, nobs, alpha, alternative=alternative)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs1=None, alpha=None, power=None,
ratio=1., alternative='two-sided'):
'''solve for any one parameter of the power of a two sample z-test
for z-test the keywords are:
effect_size, nobs1, alpha, power, ratio
exactly one needs to be ``None``, all others need numeric values
Parameters
----------
effect_size : float
standardized effect size, difference between the two means divided
by the standard deviation.
If ratio=0, then this is the standardized mean in the one sample
test.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
``ratio`` can be set to zero in order to get the power for a
one sample test.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ration given the other
arguments it has to be explicitly set to None.
alternative : string, 'two-sided' (default), 'larger', 'smaller'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test. The one-sided test can be
either 'larger', 'smaller'.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remaining parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(NormalIndPower, self).solve_power(effect_size=effect_size,
nobs1=nobs1,
alpha=alpha,
power=power,
ratio=ratio,
alternative=alternative)
class FTestPower(Power):
'''Statistical Power calculations for generic F-test
'''
def power(self, effect_size, df_num, df_denom, alpha, ncc=1):
'''Calculate the power of a F-test.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
df_num : int or float
numerator degrees of freedom.
df_denom : int or float
denominator degrees of freedom.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ncc : int
degrees of freedom correction for non-centrality parameter.
see Notes
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
Notes
-----
sample size is given implicitly by df_num
set ncc=0 to match t-test, or f-test in LikelihoodModelResults.
ncc=1 matches the non-centrality parameter in R::pwr::pwr.f2.test
ftest_power with ncc=0 should also be correct for f_test in regression
models, with df_num and d_denom as defined there. (not verified yet)
'''
pow_ = ftest_power(effect_size, df_num, df_denom, alpha, ncc=ncc)
#print effect_size, df_num, df_denom, alpha, pow_
return pow_
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, df_num=None, df_denom=None,
nobs=None, alpha=None, power=None, ncc=1):
'''solve for any one parameter of the power of a F-test
for the one sample F-test the keywords are:
effect_size, df_num, df_denom, alpha, power
Exactly one needs to be ``None``, all others need numeric values.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
alternative : string, 'two-sided' (default) or 'one-sided'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test.
'one-sided' assumes we are in the relevant tail.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remainding parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(FTestPower, self).solve_power(effect_size=effect_size,
df_num=df_num,
df_denom=df_denom,
alpha=alpha,
power=power,
ncc=ncc)
class FTestAnovaPower(Power):
'''Statistical Power calculations F-test for one factor balanced ANOVA
'''
def power(self, effect_size, nobs, alpha, k_groups=2):
'''Calculate the power of a F-test for one factor ANOVA.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
k_groups : int or float
number of groups in the ANOVA or k-sample comparison. Default is 2.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
return ftest_anova_power(effect_size, nobs, alpha, k_groups=k_groups)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs=None, alpha=None, power=None,
k_groups=2):
'''solve for any one parameter of the power of a F-test
for the one sample F-test the keywords are:
effect_size, nobs, alpha, power
Exactly one needs to be ``None``, all others need numeric values.
Parameters
----------
effect_size : float
standardized effect size, mean divided by the standard deviation.
effect size has to be positive.
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remainding parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
# update start values for root finding
if not k_groups is None:
self.start_ttp['nobs'] = k_groups * 10
self.start_bqexp['nobs'] = dict(low=k_groups * 2,
start_upp=k_groups * 10)
# first attempt at special casing
if effect_size is None:
return self._solve_effect_size(effect_size=effect_size,
nobs=nobs,
alpha=alpha,
k_groups=k_groups,
power=power)
return super(FTestAnovaPower, self).solve_power(effect_size=effect_size,
nobs=nobs,
alpha=alpha,
k_groups=k_groups,
power=power)
def _solve_effect_size(self, effect_size=None, nobs=None, alpha=None,
power=None, k_groups=2):
'''experimental, test failure in solve_power for effect_size
'''
def func(x):
effect_size = x
return self._power_identity(effect_size=effect_size,
nobs=nobs,
alpha=alpha,
k_groups=k_groups,
power=power)
val, r = optimize.brentq(func, 1e-8, 1-1e-8, full_output=True)
if not r.converged:
print(r)
return val
class GofChisquarePower(Power):
'''Statistical Power calculations for one sample chisquare test
'''
def power(self, effect_size, nobs, alpha, n_bins, ddof=0):
#alternative='two-sided'):
'''Calculate the power of a chisquare test for one sample
Only two-sided alternative is implemented
Parameters
----------
effect_size : float
standardized effect size, according to Cohen's definition.
see :func:`statsmodels.stats.gof.chisquare_effectsize`
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
n_bins : int
number of bins or cells in the distribution.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
from statsmodels.stats.gof import chisquare_power
return chisquare_power(effect_size, nobs, n_bins, alpha, ddof=0)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs=None, alpha=None,
power=None, n_bins=2):
'''solve for any one parameter of the power of a one sample chisquare-test
for the one sample chisquare-test the keywords are:
effect_size, nobs, alpha, power
Exactly one needs to be ``None``, all others need numeric values.
n_bins needs to be defined, a default=2 is used.
Parameters
----------
effect_size : float
standardized effect size, according to Cohen's definition.
see :func:`statsmodels.stats.gof.chisquare_effectsize`
nobs : int or float
sample size, number of observations.
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
n_bins : int
number of bins or cells in the distribution
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remaining parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(GofChisquarePower, self).solve_power(effect_size=effect_size,
nobs=nobs,
n_bins=n_bins,
alpha=alpha,
power=power)
class _GofChisquareIndPower(Power):
'''Statistical Power calculations for chisquare goodness-of-fit test
TODO: this is not working yet
for 2sample case need two nobs in function
no one-sided chisquare test, is there one? use normal distribution?
-> drop one-sided options?
'''
def power(self, effect_size, nobs1, alpha, ratio=1,
alternative='two-sided'):
'''Calculate the power of a chisquare for two independent sample
Parameters
----------
effect_size : float
standardize effect size, difference between the two means divided
by the standard deviation. effect size has to be positive.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ration given the other
arguments it has to be explicitely set to None.
alternative : string, 'two-sided' (default) or 'one-sided'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test.
'one-sided' assumes we are in the relevant tail.
Returns
-------
power : float
Power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
'''
from statsmodels.stats.gof import chisquare_power
nobs2 = nobs1*ratio
#equivalent to nobs = n1*n2/(n1+n2)=n1*ratio/(1+ratio)
nobs = 1./ (1. / nobs1 + 1. / nobs2)
return chisquare_power(effect_size, nobs, alpha)
#method is only added to have explicit keywords and docstring
def solve_power(self, effect_size=None, nobs1=None, alpha=None, power=None,
ratio=1., alternative='two-sided'):
'''solve for any one parameter of the power of a two sample z-test
for z-test the keywords are:
effect_size, nobs1, alpha, power, ratio
exactly one needs to be ``None``, all others need numeric values
Parameters
----------
effect_size : float
standardize effect size, difference between the two means divided
by the standard deviation.
nobs1 : int or float
number of observations of sample 1. The number of observations of
sample two is ratio times the size of sample 1,
i.e. ``nobs2 = nobs1 * ratio``
alpha : float in interval (0,1)
significance level, e.g. 0.05, is the probability of a type I
error, that is wrong rejections if the Null Hypothesis is true.
power : float in interval (0,1)
power of the test, e.g. 0.8, is one minus the probability of a
type II error. Power is the probability that the test correctly
rejects the Null Hypothesis if the Alternative Hypothesis is true.
ratio : float
ratio of the number of observations in sample 2 relative to
sample 1. see description of nobs1
The default for ratio is 1; to solve for ration given the other
arguments it has to be explicitely set to None.
alternative : string, 'two-sided' (default) or 'one-sided'
extra argument to choose whether the power is calculated for a
two-sided (default) or one sided test.
'one-sided' assumes we are in the relevant tail.
Returns
-------
value : float
The value of the parameter that was set to None in the call. The
value solves the power equation given the remainding parameters.
Notes
-----
The function uses scipy.optimize for finding the value that satisfies
the power equation. It first uses ``brentq`` with a prior search for
bounds. If this fails to find a root, ``fsolve`` is used. If ``fsolve``
also fails, then, for ``alpha``, ``power`` and ``effect_size``,
``brentq`` with fixed bounds is used. However, there can still be cases
where this fails.
'''
return super(_GofChisquareIndPower, self).solve_power(effect_size=effect_size,
nobs1=nobs1,
alpha=alpha,
power=power,
ratio=ratio,
alternative=alternative)
#shortcut functions
tt_solve_power = TTestPower().solve_power
tt_ind_solve_power = TTestIndPower().solve_power
zt_ind_solve_power = NormalIndPower().solve_power
| bsd-3-clause |
gibiansky/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 3 | 38410 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = tf.contrib.learn.datasets.load_boston()
features = tf.train.limit_epochs(
tf.reshape(tf.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = tf.reshape(tf.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = tf.FIFOQueue(30, tf.int32)
queue_runner = tf.train.QueueRunner(fake_queue, [tf.constant(0)])
tf.train.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = tf.reshape(tf.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = tf.contrib.learn.datasets.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': tf.reshape(tf.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = tf.contrib.learn.datasets.load_boston()
n_examples = len(boston.target)
features = tf.reshape(
tf.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = tf.reshape(tf.constant(boston.target), [n_examples, 1])
return tf.concat_v2([features, features], 0), tf.concat_v2([labels, labels],
0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (
tf.contrib.learn.ModeKeys.TRAIN,
tf.contrib.learn.ModeKeys.EVAL,
tf.contrib.learn.ModeKeys.INFER)
prediction, loss = (
tf.contrib.learn.models.linear_regression_zero_init(features, labels)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (
tf.contrib.learn.ModeKeys.TRAIN,
tf.contrib.learn.ModeKeys.EVAL,
tf.contrib.learn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (
tf.contrib.learn.models.linear_regression_zero_init(features, labels)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (
tf.contrib.learn.ModeKeys.TRAIN,
tf.contrib.learn.ModeKeys.EVAL,
tf.contrib.learn.ModeKeys.INFER)
prediction, loss = (
tf.contrib.learn.models.linear_regression_zero_init(features, labels)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(mode=mode,
predictions=prediction,
loss=loss,
train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = tf.one_hot(labels, 3, 1, 0)
prediction, loss = (
tf.contrib.learn.models.logistic_regression_zero_init(features, labels)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150], dtype=tf.int32)
feature_columns = [tf.contrib.layers.real_valued_column('feature',
dimension=4)]
est = tf.contrib.learn.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(
feature_columns)
export_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def export_input_fn_with_asset():
features, labels, inputs = export_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = tf.gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = tf.contrib.lookup.HashTable(
tf.contrib.lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
tf.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, export_input_fn_with_asset
class CheckCallsMonitor(tf.contrib.learn.monitors.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(tf.test.TestCase):
def testModelFnArgs(self):
expected_param = {'some_param': 'some_value'}
expected_config = tf.contrib.learn.RunConfig()
expected_config.i_am_test = True
def _argument_checker(features, labels, mode, params, config):
_, _ = features, labels
self.assertEqual(tf.contrib.learn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertTrue(config.i_am_test)
return tf.constant(0.), tf.constant(0.), tf.constant(0.)
est = tf.contrib.learn.Estimator(model_fn=_argument_checker,
params=expected_param,
config=expected_config)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = tf.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = tf.contrib.learn.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = tf.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == tf.contrib.learn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = tf.contrib.learn.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = tf.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = tf.contrib.learn.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(boston_input_fn, num_epochs=1),
as_iterable=True)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = tf.get_default_graph().seed
return tf.constant([[1.]]), tf.constant([1.])
config = tf.contrib.learn.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testCheckInputs(self):
est = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7., 8.], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7., 10.], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(ValueError,
'Either x or input_fn must be provided.',
est.fit, x=None, input_fn=None)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and x or y',
est.fit, x='X', input_fn=iris_input_fn)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and x or y',
est.fit, y='Y', input_fn=iris_input_fn)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and batch_size',
est.fit, input_fn=iris_input_fn, batch_size=100)
self.assertRaisesRegexp(
ValueError, 'Inputs cannot be tensors. Please provide input_fn.',
est.fit, x=tf.constant(1.))
def testUntrained(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=linear_model_fn))
with self.assertRaises(tf.contrib.learn.NotFittedError):
_ = est.score(
x=boston.data,
y=boston.target.astype(np.float64))
with self.assertRaises(tf.contrib.learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTrainingDictionaryInput(self):
boston = tf.contrib.learn.datasets.load_boston()
output_dir = tempfile.mkdtemp()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn,
model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = tf.contrib.learn.Estimator(model_fn=linear_model_fn,
model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'],
scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testContinueTraining(self):
boston = tf.contrib.learn.datasets.load_boston()
output_dir = tempfile.mkdtemp()
est = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=linear_model_fn,
model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=linear_model_fn,
model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=linear_model_params_fn,
params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testBostonAll(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.SKCompat(
tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): tf.contrib.metrics.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(
predictions['class'],
predictions_class)
self.assertAllClose(
predictions['class'],
np.argmax(predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): tf.contrib.metrics.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(
classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIterator(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainInputFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testTrainStepsIsIncremental(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0]*2)
def testPredictConstInputFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = tf.reshape(tf.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = tf.reshape(tf.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {'other': tf.constant([0, 0, 0])}, tf.constant([0, 0, 0])
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitors(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testSummaryWriting(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = tf.contrib.testing.simple_values_from_events(
tf.contrib.testing.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(tf.train.SessionRunHook):
def begin(self):
self.loss_collection = tf.get_collection(tf.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with tf.test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, export_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(compat.as_bytes(tmpdir),
compat.as_bytes('my_extra_file'))
extra_file = tf.gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(compat.as_bytes(tmpdir),
compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, export_input_fn,
assets_extra=assets_extra)
self.assertTrue(tf.gfile.Exists(export_dir_base))
self.assertTrue(tf.gfile.Exists(export_dir))
self.assertTrue(tf.gfile.Exists(
os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(tf.gfile.Exists(
os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('variables'))))
self.assertTrue(tf.gfile.Exists(
os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(tf.gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(tf.gfile.Exists(
os.path.join(compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(tf.gfile.Exists(
os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(tf.gfile.GFile(
os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(tf.gfile.Exists(
os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))))
self.assertTrue(tf.gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(tf.gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(compat.as_bytes(tmpdir),
compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with tf.Graph().as_default() as graph:
with tf.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# cleanup
tf.gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(tf.test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
tf.contrib.learn.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
tf.contrib.learn.infer_real_valued_columns_from_input(tf.constant(1.0))
def _assert_single_feature_column(
self, expected_shape, expected_dtype, feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual({
'': tf.FixedLenFeature(shape=expected_shape, dtype=expected_dtype)
}, feature_column.config)
def testInt32Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], tf.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.int32), None))
self._assert_single_feature_column([8], tf.int32, feature_columns)
def testInt64Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], tf.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.int64), None))
self._assert_single_feature_column([8], tf.int64, feature_columns)
def testFloat32Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], tf.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.float32), None))
self._assert_single_feature_column([8], tf.float32, feature_columns)
def testFloat64Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], tf.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.float64), None))
self._assert_single_feature_column([8], tf.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
tf.contrib.learn.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.constant(False, shape=[7, 8], dtype=tf.bool), None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (
tf.constant([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column(
[_BOSTON_INPUT_DIM], tf.float64, feature_columns)
def testIrisInputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column(
[_IRIS_INPUT_DIM], tf.float64, feature_columns)
class ReplicaDeviceSetterTest(tf.test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {tf.contrib.learn.TaskType.PS: ['fake_ps_0']}}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig()
with tf.device(estimator._get_replica_device_setter(config)):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig())):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {tf.contrib.learn.TaskType.PS: ['fake_ps_0']}}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig()
with tf.device(estimator._get_replica_device_setter(config)):
default_val = tf.constant([-1, -1], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
input_string = tf.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig())):
default_val = tf.constant([-1, -1], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
input_string = tf.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
tf.contrib.learn.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': tf.contrib.learn.TaskType.WORKER,
'index': 3
}
}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig()
with tf.device(estimator._get_replica_device_setter(config)):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
maxplanck-ie/HiCExplorer | hicexplorer/hicCorrectMatrix.py | 1 | 31877 | import warnings
import sys
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.simplefilter(action="ignore", category=PendingDeprecationWarning)
import argparse
from past.builtins import zip
from scipy.sparse import lil_matrix
from hicexplorer.iterativeCorrection import iterativeCorrection
from hicmatrix import HiCMatrix as hm
from hicexplorer._version import __version__
from hicexplorer.utilities import toString
from hicexplorer.utilities import convertNansToZeros, convertInfsToZeros
from hicexplorer.utilities import check_cooler
# Knight-Ruiz algorithm:
from krbalancing import *
# packages needed for plotting:
from matplotlib import use
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import numpy as np
debug = 0
import logging
log = logging.getLogger(__name__)
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
conflict_handler='resolve',
description="""
This function provides 2 balancing methods which can be applied on a raw
matrix.
I. KR: It balances a matrix using a fast balancing algorithm introduced by
Knight and Ruiz (2012).
II. ICE: Iterative correction of a Hi-C matrix (see Imakaev et al. 2012
Nature Methods for details). For this method to work correctly, bins with
zero reads assigned to them should be removed as they cannot be corrected.
Also, bins with low number of reads should be removed,
otherwise, during the correction step, the counts associated with
those bins will be amplified (usually, zero and low coverage bins
tend to contain repetitive regions). Bins with extremely high number
of reads can also be removed from the correction as they may represent
copy number variations.
To aid in the identification of bins with low and high read coverage, the
histogram of the number of reads can be plotted together with the
Median Absolute Deviation (MAD).
It is recommended to run hicCorrectMatrix as follows:
$ hicCorrectMatrix diagnostic_plot --matrix hic_matrix.h5 -o plot_file.png
Then, after revising the plot and deciding on the threshold values:
$ hicCorrectMatrix correct --correctionMethod ICE --matrix hic_matrix.h5 \r
--filterThreshold <lower threshold> <upper threshold> -o corrected_matrix
For a more in-depth review of how to determine the threshold values,
please visit:
http://hicexplorer.readthedocs.io/en/latest/content/example_usage.html\
#correction-of-hi-c-matrix
We recommend to compute first the normalization (with hicNormalize) and correct the data (with hicCorrectMatrix) in a second step.
"""
)
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
subparsers = parser.add_subparsers(
title="Options",
dest='command',
metavar='',
help="""To get detailed help on each of the options: \r
$ hicCorrectMatrix diagnostic_plot -h \r
$ hicCorrectMatrix correct -h
""")
plot_mode = subparsers.add_parser(
'diagnostic_plot',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help="""Plots a histogram of the coverage per bin together with the
modified z-score based on the median absolute deviation method
(see Boris Iglewicz and David Hoaglin 1993, Volume 16: How to Detect
and Handle Outliers The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor).
""",
usage='%(prog)s '
'--matrix hic_matrix.h5 '
'-o file.png')
plot_modeRequired = plot_mode.add_argument_group('Required arguments')
plot_modeRequired.add_argument('--matrix', '-m',
help='Name of the Hi-C matrix to correct in .h5 format.',
required=True)
plot_modeRequired.add_argument('--plotName', '-o',
help='File name to save the diagnostic plot.',
required=True)
plot_modeOpt = plot_mode.add_argument_group('Optional arguments')
plot_modeOpt.add_argument('--chromosomes',
help='List of chromosomes to be included in the iterative '
'correction. The order of the given chromosomes will be then '
'kept for the resulting corrected matrix.',
default=None,
nargs='+')
plot_modeOpt.add_argument('--xMax',
help='Max value for the x-axis in counts per bin.',
default=None,
type=float)
plot_modeOpt.add_argument(
'--perchr',
help='Compute histogram per chromosome. For samples from cells with uneven number '
'of chromosomes and/or translocations it is advisable to check the histograms '
'per chromosome to find the most conservative `filterThreshold`.',
action='store_true')
plot_modeOpt.add_argument('--verbose',
help='Print processing status.',
action='store_true')
subparsers.add_parser(
'correct',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=[correct_subparser()],
help="""Run Knight-Ruiz matrix balancing algorithm (KR) or the iterative matrix correction (ICE) .""",
usage='%(prog)s '
'--matrix hic_matrix.h5 '
'--filterThreshold -1.2 5 (Only if ICE)'
'-out corrected_matrix.h5 \n')
return parser
def correct_subparser():
# define the arguments
parser = argparse.ArgumentParser(add_help=False)
parserRequired = parser.add_argument_group('Required arguments')
parserRequired.add_argument('--matrix', '-m',
help='Name of the Hi-C matrix to correct in .h5 format.',
required=True)
parserRequired.add_argument('--outFileName', '-o',
help='File name to save the resulting matrix. '
'The output is a .h5 file.',
required=True)
parserOpt = parser.add_argument_group('Optional arguments')
parserOpt.add_argument('--correctionMethod',
help='Method to be used for matrix correction. It'
' can be set to KR or ICE.',
type=str,
metavar='STR',
default='KR')
parserOpt.add_argument('--filterThreshold', '-t',
help='Removes bins of low or large coverage. '
'Usually these bins do not contain valid '
'Hi-C data or represent regions that '
'accumulate reads and thus must be '
'discarded. Use hicCorrectMatrix '
'diagnostic_plot to identify the modified '
'z-value thresholds. A lower and upper '
'threshold are required separated by '
'space, e.g. --filterThreshold -1.5 5. Applied'
' only for ICE!',
type=float,
nargs=2,
default=None)
parserOpt.add_argument('--iterNum', '-n',
help='Number of iterations to compute.'
'only for ICE!',
type=int,
metavar='INT',
default=500)
parserOpt.add_argument('--inflationCutoff',
help='Value corresponding to the maximum number of '
'times a bin can be scaled up during the iterative '
'correction. For example, an inflation cutoff of 3 '
'will filter out all bins that were expanded 3 '
'times or more during the iterative correctionself.'
'Only for ICE!',
type=float)
parserOpt.add_argument('--transCutoff', '-transcut',
help='Clip high counts in the top -transcut trans '
'regions (i.e. between chromosomes). A usual value '
'is 0.05. Only for ICE! ',
type=float)
parserOpt.add_argument('--sequencedCountCutoff',
help='Each bin receives a value indicating the '
'fraction that is covered by reads. A cutoff of '
'0.5 will discard all those bins that have less '
'than half of the bin covered. Only for ICE!',
default=None,
type=float)
parserOpt.add_argument('--chromosomes',
help='List of chromosomes to be included in the '
'iterative correction. The order of the given '
'chromosomes will be then kept for the resulting '
'corrected matrix',
default=None,
nargs='+')
parserOpt.add_argument('--skipDiagonal', '-s',
help='If set, diagonal counts are not included.'
' Only for ICE!',
action='store_true')
parserOpt.add_argument('--perchr',
help='Normalize each chromosome separately. This is'
' useful for samples from cells with uneven number '
'of chromosomes and/or translocations.',
action='store_true')
parserOpt.add_argument('--verbose',
help='Print processing status.',
action='store_true')
parserOpt.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
def iterative_correction(matrix, args):
corrected_matrix, correction_factors = iterativeCorrection(matrix,
M=args.iterNum,
verbose=args.verbose)
return corrected_matrix, correction_factors
def fill_gaps(hic_ma, failed_bins, fill_contiguous=False):
""" try to fill-in the failed_bins the matrix by adding the
average values of the neighboring rows and cols. The idea
for the iterative correction is that is best to put
something in contrast to not put anything
hic_ma: Hi-C matrix object
failed_bins: list of bin ids
fill_contiguous: If True, stretches of masked rows/cols are filled.
Otherwise, these cases are skipped
"""
log.debug("starting fill gaps")
mat_size = hic_ma.matrix.shape[0]
fill_ma = hic_ma.matrix.copy().tolil()
if fill_contiguous is True:
discontinuous_failed = failed_bins
consecutive_failed_idx = np.array([])
else:
# find stretches of consecutive failed regions
consecutive_failed_idx = np.flatnonzero(np.diff(failed_bins) == 1)
# the banned list of indices is equal to the actual list
# and the list plus one, to identify consecutive failed regions.
# for [1,2,5,10] the np.diff is [1,3,5]. The consecutive id list
# is [0], for '1', in the original list, but we are missing the '2'
# thats where the consecutive_failed_idx+1 comes.
consecutive_failed_idx = np.unique(np.sort(
np.concatenate([consecutive_failed_idx,
consecutive_failed_idx + 1])))
# find the failed regions that are not consecutive
discontinuous_failed = [x for idx, x in enumerate(failed_bins)
if idx not in consecutive_failed_idx]
log.debug("Filling {} failed bins\n".format(
len(discontinuous_failed)))
"""
for missing_bin in discontinuous_failed:
if 0 < missing_bin < mat_size - 1:
for idx in range(1, mat_size - 2):
if idx % 100 == 0:
log.info(".")
# the new row value is the mean between the upper
# and lower bins corresponding to the same diagonal
fill_ma[missing_bin, idx :] = \
(hic_ma.matrix[missing_bin-1, idx-1] +
hic_ma.matrix[missing_bin+1, idx+1]) / 2
# same for cols
fill_ma[idx, missing_bin] = \
(hic_ma.matrix[idx-1, missing_bin-1] +
hic_ma.matrix[idx+1, missing_bin+1]) / 2
"""
for missing_bin in discontinuous_failed:
if 0 < missing_bin < mat_size - 1:
# the new row value is the mean between the upper
# and lower rows
fill_ma[missing_bin, 1:mat_size - 1] = \
(hic_ma.matrix[missing_bin - 1, :mat_size - 2] +
hic_ma.matrix[missing_bin + 1, 2:]) / 2
# same for cols
fill_ma[1:mat_size - 1, missing_bin] = \
(hic_ma.matrix[:mat_size - 2, missing_bin - 1] +
hic_ma.matrix[2:, missing_bin + 1]) / 2
# identify the intersection points of the failed regions because they
# neighbors get wrong values
for bin_a in discontinuous_failed:
for bin_b in discontinuous_failed:
if 0 < bin_a < mat_size and \
0 < bin_b < mat_size:
# the fill value is the average over the
# neighbors that do have a value
fill_value = np.mean([
hic_ma.matrix[bin_a - 1, bin_b - 1],
hic_ma.matrix[bin_a - 1, bin_b + 1],
hic_ma.matrix[bin_a + 1, bin_b - 1],
hic_ma.matrix[bin_a + 1, bin_b + 1],
])
fill_ma[bin_a - 1, bin_b] = fill_value
fill_ma[bin_a + 1, bin_b] = fill_value
fill_ma[bin_a, bin_b - 1] = fill_value
fill_ma[bin_a, bin_b + 1] = fill_value
# return the matrix and the bins that continue to be failed regions
return fill_ma.tocsr(), np.sort(failed_bins[consecutive_failed_idx])
class MAD(object):
def __init__(self, points):
"""
Returns a boolean array with True if points are outliers and False
otherwise.
:param points: An array
:returns: A numobservations-length boolean array.
:references: Boris Iglewicz and David Hoaglin (1993), "Volume 16:
How to Detect and Handle Outliers", The ASQC Basic References in
Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
"""
self.mad_b_value = 0.6745
if len(points.shape) == 1:
points = points[:, None]
self.median = np.median(points[points > 0], axis=0)
diff = np.sum((points - self.median), axis=-1)
self.med_abs_deviation = np.median(np.abs(diff))
self.modified_z_score = self.mad_b_value * diff / self.med_abs_deviation
def get_motified_zscores(self):
return self.modified_z_score
def is_outlier(self, lower_threshold, upper_threshold):
"""
Returns a boolean list of outliers
:param lower_threshold: Lower median absolute deviation
:param upper_threshold: upper median absolute deviation
:return: boolean array
"""
return (self.modified_z_score < lower_threshold) | \
(self.modified_z_score > upper_threshold)
def value_to_mad(self, value):
"""
return the mad value for a given value
based on the data
"""
log.debug("self.median: {}".format(self.median))
diff = value - self.median
log.debug("diff: {}".format(diff))
log.debug("self.med_abs_deviation: {}".format(self.med_abs_deviation))
log.debug("self.mad_b_value: {}".format(self.mad_b_value))
log.debug("all together: {}".format(
self.mad_b_value * diff / self.med_abs_deviation))
# workaround for 'Axis limits cannot be NaN or Inf' bug in version 2.1.1
# prevent dividing by 0!!!
if self.med_abs_deviation == 0.0:
return self.mad_b_value * diff
return self.mad_b_value * diff / self.med_abs_deviation
def mad_to_value(self, mad):
"""
return the numeric value for a given mad score
based on the data
z = b_v * (x - median) / mad
z * mad / b_v = x - median
(z * mad / b_v) + median = x
"""
return (mad * self.med_abs_deviation / self.mad_b_value) + self.median
def plot_total_contact_dist(hic_ma, args):
"""
Plots the distribution of number of contacts (excluding self contacts)
Outliers with a high number are removed for the plot
:param hic_ma: sparse matrix
:return:
"""
use('Agg')
majorlocator = MultipleLocator(1)
majorformatter = FormatStrFormatter('%d')
minorlocator = MultipleLocator(0.2)
def plot_histogram(row_sum_values, mad_values, ax1, title=None):
if args.xMax:
ax1.set_xlim(ax1.get_xlim()[0], args.xMax)
row_sum_values = row_sum_values[row_sum_values < args.xMax]
ax1.set_xlabel("total counts per bin")
ax1.set_ylabel("frequency")
# ax1.xaxis.grid(True)
ax1.patch.set_visible(False)
dist, bin_s, __ = ax1.hist(row_sum_values, 100, color='green')
# add second axis on top
ax2 = ax1.twiny()
ax2.set_xlabel("modified z-score")
ax2.xaxis.set_major_locator(majorlocator)
ax2.xaxis.set_major_formatter(majorformatter)
ax2.xaxis.grid(True, which='minor')
# for the minor ticks, use no labels; default NullFormatter
ax2.xaxis.set_minor_locator(minorlocator)
# update second axis values by mapping the min max
# of the main axis to the translated values
# into modified z score.
# workaround for 'Axis limits cannot be NaN or Inf' bug in version 2.1.1
log.debug("ax1.get_xlim(): {}".format(ax1.get_xlim()))
log.debug("np.array(ax1.get_xlim()): {}".format(np.array(
ax1.get_xlim())))
log.debug("mad_values.value_to_mad(np.array(ax1.get_xlim())): {}".format(
mad_values.value_to_mad(np.array(ax1.get_xlim()))))
ax2.set_xlim(mad_values.value_to_mad(np.array(ax1.get_xlim())))
# get first local mininum value
local_min = [x for x, y in enumerate(dist) if 1 <= x < len(
dist) - 1 and dist[x - 1] > y < dist[x + 1]]
if len(local_min) > 0:
threshold = bin_s[local_min[0]]
else:
threshold = None
if threshold:
mad_threshold = mad_values.value_to_mad(threshold)
ymin, ymax = ax2.get_ylim()
ax2.vlines(mad_threshold, ymin, ymax)
if title:
log.info("{}: mad threshold {}".format(title, mad_threshold))
else:
log.info("mad threshold {}".format(mad_threshold))
# replace nan by 0
# hic_ma.matrix.data[np.isnan(hic_ma.matrix.data)] = 0
hic_ma.matrix = convertNansToZeros(hic_ma.matrix)
hic_ma.matrix = convertInfsToZeros(hic_ma.matrix)
if args.perchr:
chroms = hic_ma.getChrNames()
if len(chroms) > 30:
log.warning("The matrix contains {} chromosomes. It is not "
"practical to plot each. Try using --chromosomes to "
"select some chromosomes or plot a single histogram.")
num_rows = int(np.ceil(float(len(chroms)) / 5))
num_cols = min(len(chroms), 5)
grids = gridspec.GridSpec(num_rows, num_cols)
fig = plt.figure(figsize=(6 * num_cols, 5 * num_rows))
ax = {}
for plot_num, chrname in enumerate(chroms):
log.info("Plotting chromosome {}".format(chrname))
chr_range = hic_ma.getChrBinRange(chrname)
chr_submatrix = hic_ma.matrix[chr_range[0]:chr_range[1], chr_range[0]:chr_range[1]]
row_sum = np.asarray(chr_submatrix.sum(axis=1)).flatten()
row_sum = row_sum - chr_submatrix.diagonal()
mad = MAD(row_sum)
modified_z_score = mad.get_motified_zscores()
# high remove outliers
row_sum = row_sum[modified_z_score < 5]
col = plot_num % num_cols
row = plot_num // num_cols
ax[chrname] = fig.add_subplot(grids[row, col])
plot_histogram(row_sum, mad, ax[chrname], title=chrname)
ax[chrname].set_title(chrname)
else:
fig = plt.figure()
row_sum = np.asarray(hic_ma.matrix.sum(axis=1)).flatten()
row_sum = row_sum - hic_ma.matrix.diagonal()
mad = MAD(row_sum)
modified_z_score = mad.get_motified_zscores()
# high remove outliers
row_sum = row_sum[modified_z_score < 5]
ax = fig.add_subplot(111)
plot_histogram(row_sum, mad, ax)
plt.tight_layout()
plt.savefig(args.plotName)
plt.close()
def filter_by_zscore(hic_ma, lower_threshold, upper_threshold, perchr=False):
"""
The method defines thresholds per chromosome
to avoid introducing bias due to different chromosome numbers
"""
to_remove = []
if perchr:
for chrname in list(hic_ma.interval_trees):
chr_range = hic_ma.getChrBinRange(chrname)
chr_submatrix = hic_ma.matrix[chr_range[0]:chr_range[1],
chr_range[0]:chr_range[1]]
# replace nan values by zero
chr_submatrix.data[np.isnan(chr_submatrix.data)] = 0
row_sum = np.asarray(chr_submatrix.sum(axis=1)).flatten()
# subtract from row sum, the diagonal
# to account for interactions with other bins
# and not only self interactions that are the dominant count
row_sum = row_sum - chr_submatrix.diagonal()
mad = MAD(row_sum)
problematic = np.flatnonzero(
mad.is_outlier(lower_threshold, upper_threshold))
# because the problematic indices are specific for the given chromosome
# they need to be updated to match the large matrix indices
problematic += chr_range[0]
if len(problematic) == 0:
log.warn("Warning. No bins removed for chromosome {} using thresholds {} {}"
"\n".format(chrname, lower_threshold, upper_threshold))
to_remove.extend(problematic)
else:
row_sum = np.asarray(hic_ma.matrix.sum(axis=1)).flatten()
# subtract from row sum, the diagonal
# to account for interactions with other bins
# and not only self interactions that are the dominant count
row_sum = row_sum - hic_ma.matrix.diagonal()
mad = MAD(row_sum)
to_remove = np.flatnonzero(mad.is_outlier(
lower_threshold, upper_threshold))
return sorted(to_remove)
def main(args=None):
args = parse_arguments().parse_args(args)
if args.verbose:
log.setLevel(logging.INFO)
# args.chromosomes
if check_cooler(args.matrix) and args.chromosomes is not None and len(args.chromosomes) == 1:
ma = hm.hiCMatrix(args.matrix, pChrnameList=toString(args.chromosomes))
else:
ma = hm.hiCMatrix(args.matrix)
if args.chromosomes:
ma.reorderChromosomes(toString(args.chromosomes))
# mask all zero value bins
if 'correctionMethod' in args:
if args.correctionMethod == 'ICE':
row_sum = np.asarray(ma.matrix.sum(axis=1)).flatten()
log.info("Removing {} zero value bins".format(sum(row_sum == 0)))
ma.maskBins(np.flatnonzero(row_sum == 0))
matrix_shape = ma.matrix.shape
if 'plotName' in args:
row_sum = np.asarray(ma.matrix.sum(axis=1)).flatten()
log.info("Removing {} zero value bins".format(sum(row_sum == 0)))
ma.maskBins(np.flatnonzero(row_sum == 0))
matrix_shape = ma.matrix.shape
ma.matrix = convertNansToZeros(ma.matrix)
ma.matrix = convertInfsToZeros(ma.matrix)
ma.matrix = ma.matrix.astype(np.float64, copy=True)
log.debug('ma.matrix.indices {}'.format(ma.matrix.indices.dtype))
log.debug('ma.matrix.data {}'.format(ma.matrix.data.dtype))
log.debug('ma.matrix.indptr {}'.format(ma.matrix.indptr.dtype))
# log.debug('ma.matrix.indices {}'.format(np.max(ma.matrix.indices)))
# log.debug('ma.matrix.data {}'.format(np.max(ma.matrix.data)))
# log.debug('ma.matrix.indptr {}'.format(np.max(ma.matrix.indptr)))
# ma.matrix.indptr = ma.matrix.indptr.astype(np.int32, copy=False)
# ma.matrix.indices = ma.matrix.indices.astype(np.int32, copy=False)
if 'plotName' in args:
plot_total_contact_dist(ma, args)
log.info("Saving diagnostic plot {}\n".format(args.plotName))
return
log.info("matrix contains {} data points. Sparsity {:.3f}.".format(
len(ma.matrix.data),
float(len(ma.matrix.data)) / (ma.matrix.shape[0] ** 2)))
if args.skipDiagonal:
ma.diagflat(value=0)
total_filtered_out = set()
if args.correctionMethod == 'ICE':
if not args.filterThreshold:
log.error('min and max filtering thresholds should be set')
sys.exit(1)
outlier_regions = filter_by_zscore(
ma, args.filterThreshold[0], args.filterThreshold[1], perchr=args.perchr)
# compute and print some statistics
pct_outlier = 100 * float(len(outlier_regions)) / ma.matrix.shape[0]
ma.printchrtoremove(outlier_regions, label="Bins that are MAD outliers ({:.2f}%) "
"out of".format(pct_outlier, ma.matrix.shape[0]),
restore_masked_bins=False)
assert matrix_shape == ma.matrix.shape
# mask filtered regions
ma.maskBins(outlier_regions)
total_filtered_out = set(outlier_regions)
if args.sequencedCountCutoff and 0 < args.sequencedCountCutoff < 1:
chrom, _, _, coverage = zip(*ma.cut_intervals)
assert type(coverage[0]) == np.float64
failed_bins = np.flatnonzero(
np.array(coverage) < args.sequencedCountCutoff)
ma.printchrtoremove(failed_bins, label="Bins with low coverage",
restore_masked_bins=False)
ma.maskBins(failed_bins)
total_filtered_out = set(failed_bins)
"""
ma.matrix, to_remove = fill_gaps(ma, failed_bins)
log.warning("From {} failed bins, {} could "
"not be filled\n".format(len(failed_bins),
len(to_remove)))
ma.maskBins(to_remove)
"""
if args.transCutoff and 0 < args.transCutoff < 100:
cutoff = float(args.transCutoff) / 100
# a usual cutoff is 0.05
ma.truncTrans(high=cutoff)
pre_row_sum = np.asarray(ma.matrix.sum(axis=1)).flatten()
correction_factors = []
corrected_matrix = lil_matrix(ma.matrix.shape)
if args.perchr:
# normalize each chromosome independently
for chrname in list(ma.interval_trees):
chr_range = ma.getChrBinRange(chrname)
chr_submatrix = ma.matrix[chr_range[0]:chr_range[1], chr_range[0]:chr_range[1]]
if args.correctionMethod == 'ICE':
_matrix, _corr_factors = iterative_correction(
chr_submatrix, args)
corrected_matrix[chr_range[0]:chr_range[1],
chr_range[0]:chr_range[1]] = _matrix
correction_factors.append(_corr_factors)
else:
# Set the kr matrix along with its correction factors vector
assert(args.correctionMethod == 'KR')
log.debug("Loading a float sparse matrix for KR balancing")
kr = kr_balancing(chr_submatrix.shape[0],
chr_submatrix.shape[1],
chr_submatrix.count_nonzero(),
chr_submatrix.indptr.astype(
np.int64, copy=False),
chr_submatrix.indices.astype(
np.int64, copy=False),
chr_submatrix.data.astype(np.float64, copy=False))
kr.computeKR()
if args.outFileName.endswith('.h5'):
corrected_matrix[chr_range[0]:chr_range[1], chr_range[0]:chr_range[1]] = kr.get_normalised_matrix(True)
# correction_factors.append(np.true_divide(1,
# kr.get_normalisation_vector(False).todense()))
correction_factors.append(
kr.get_normalisation_vector(False).todense())
correction_factors = np.concatenate(correction_factors)
else:
if args.correctionMethod == 'ICE':
corrected_matrix, correction_factors = iterative_correction(
ma.matrix, args)
ma.setMatrixValues(corrected_matrix)
else:
assert(args.correctionMethod == 'KR')
log.debug("Loading a float sparse matrix for KR balancing")
kr = kr_balancing(ma.matrix.shape[0], ma.matrix.shape[1],
ma.matrix.count_nonzero(), ma.matrix.indptr.astype(np.int64, copy=False),
ma.matrix.indices.astype(np.int64, copy=False), ma.matrix.data.astype(np.float64, copy=False))
log.debug('passed pointers')
kr.computeKR()
log.debug('computation done')
# set it to False since the vector is already normalised
# with the previous True
# correction_factors = np.true_divide(1, kr.get_normalisation_vector(False).todense())
correction_factors = kr.get_normalisation_vector(False).todense()
if args.outFileName.endswith('.h5'):
corrected_matrix = kr.get_normalised_matrix(True)
if args.outFileName.endswith('.h5'):
ma.setMatrixValues(corrected_matrix)
# if
ma.setCorrectionFactors(correction_factors)
log.debug("Correction factors {}".format(correction_factors[:10]))
if args.inflationCutoff and args.inflationCutoff > 0 and args.correctionMethod == 'ICE':
after_row_sum = np.asarray(corrected_matrix.sum(axis=1)).flatten()
# identify rows that were expanded more than args.inflationCutoff times
to_remove = np.flatnonzero(
after_row_sum / pre_row_sum >= args.inflationCutoff)
ma.printchrtoremove(to_remove,
label="inflated >={} "
"regions".format(args.inflationCutoff), restore_masked_bins=False)
total_filtered_out = total_filtered_out.union(to_remove)
ma.maskBins(to_remove)
ma.printchrtoremove(sorted(list(total_filtered_out)),
label="Total regions to be removed", restore_masked_bins=False)
ma.save(args.outFileName, pApplyCorrection=False)
| gpl-2.0 |
marrcio/relate-kanji | resources/util/toolbox/graphictools.py | 1 | 1873 | import matplotlib.pyplot as plt
from collections import Counter
plt.ion()
def visualize_bars(iterable, width=0.5, color='b', counter_feed=False, high_dpi=True, transformation=lambda x:x):
if counter_feed:
c = iterable
else:
c = Counter(iterable)
if high_dpi:
plt.figure(dpi=200)
x,y = zip(*c.items())
plt.bar([n-width/2 for n in x], transformation(y), width=width, color=color)
plt.grid(True)
ax = plt.axes()
rects = ax.patches
for rect, number in zip(rects, y):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, '%d' % number, ha='center', va='bottom')
return c
def plot(X, Y=None, high_dpi=True, start_on_one=False, **kwargs):
if high_dpi:
plt.figure(dpi=200)
if Y:
plt.plot(X, Y, **kwargs)
elif start_on_one:
plt.plot(range(1, len(X) + 1), X, **kwargs)
else:
plt.plot(X, **kwargs)
def scatter(X, Y=None, high_dpi=True, dilog=False, **kwargs):
if high_dpi:
plt.figure(dpi=200)
if Y:
plt.scatter(X, Y, **kwargs)
else:
# if no Y is given, we actually want no X:
Y = X
X = range(1, len(X) + 1)
plt.scatter(X, Y, **kwargs)
if dilog:
plt.xscale('log')
plt.yscale('log')
def reference(ref, scatter=False, xmode=False, **kwargs):
"""Draws a reference line to the closest point to X"""
axes = plt.gca()
xy = axes.collections[0].get_offsets() if scatter else axes.get_lines()[0].get_xydata()
for x, y in xy:
if xmode:
if x >= ref:
break
else:
if y >= ref:
break
plt.plot((0, x), (y, y), **kwargs)
plt.plot((x, x), (0, y), **kwargs)
plt.xticks(list(plt.xticks()[0]) + [x])
plt.yticks(list(plt.yticks()[0]) + [y])
return (x, y) | mit |
kou/arrow | python/pyarrow/tests/test_flight.py | 3 | 68243 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import base64
import itertools
import os
import signal
import struct
import tempfile
import threading
import time
import traceback
import numpy as np
import pytest
import pyarrow as pa
from pyarrow.lib import tobytes
from pyarrow.util import pathlib, find_free_port
from pyarrow.tests import util
try:
from pyarrow import flight
from pyarrow.flight import (
FlightClient, FlightServerBase,
ServerAuthHandler, ClientAuthHandler,
ServerMiddleware, ServerMiddlewareFactory,
ClientMiddleware, ClientMiddlewareFactory,
)
except ImportError:
flight = None
FlightClient, FlightServerBase = object, object
ServerAuthHandler, ClientAuthHandler = object, object
ServerMiddleware, ServerMiddlewareFactory = object, object
ClientMiddleware, ClientMiddlewareFactory = object, object
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not flight'
pytestmark = pytest.mark.flight
def test_import():
# So we see the ImportError somewhere
import pyarrow.flight # noqa
def resource_root():
"""Get the path to the test resources directory."""
if not os.environ.get("ARROW_TEST_DATA"):
raise RuntimeError("Test resources not found; set "
"ARROW_TEST_DATA to <repo root>/testing/data")
return pathlib.Path(os.environ["ARROW_TEST_DATA"]) / "flight"
def read_flight_resource(path):
"""Get the contents of a test resource file."""
root = resource_root()
if not root:
return None
try:
with (root / path).open("rb") as f:
return f.read()
except FileNotFoundError:
raise RuntimeError(
"Test resource {} not found; did you initialize the "
"test resource submodule?\n{}".format(root / path,
traceback.format_exc()))
def example_tls_certs():
"""Get the paths to test TLS certificates."""
return {
"root_cert": read_flight_resource("root-ca.pem"),
"certificates": [
flight.CertKeyPair(
cert=read_flight_resource("cert0.pem"),
key=read_flight_resource("cert0.key"),
),
flight.CertKeyPair(
cert=read_flight_resource("cert1.pem"),
key=read_flight_resource("cert1.key"),
),
]
}
def simple_ints_table():
data = [
pa.array([-10, -5, 0, 5, 10])
]
return pa.Table.from_arrays(data, names=['some_ints'])
def simple_dicts_table():
dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8())
data = [
pa.chunked_array([
pa.DictionaryArray.from_arrays([1, 0, None], dict_values),
pa.DictionaryArray.from_arrays([2, 1], dict_values)
])
]
return pa.Table.from_arrays(data, names=['some_dicts'])
class ConstantFlightServer(FlightServerBase):
"""A Flight server that always returns the same data.
See ARROW-4796: this server implementation will segfault if Flight
does not properly hold a reference to the Table object.
"""
CRITERIA = b"the expected criteria"
def __init__(self, location=None, options=None, **kwargs):
super().__init__(location, **kwargs)
# Ticket -> Table
self.table_factories = {
b'ints': simple_ints_table,
b'dicts': simple_dicts_table,
}
self.options = options
def list_flights(self, context, criteria):
if criteria == self.CRITERIA:
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
def do_get(self, context, ticket):
# Return a fresh table, so that Flight is the only one keeping a
# reference.
table = self.table_factories[ticket.ticket]()
return flight.RecordBatchStream(table, options=self.options)
class MetadataFlightServer(FlightServerBase):
"""A Flight server that numbers incoming/outgoing data."""
def __init__(self, options=None, **kwargs):
super().__init__(**kwargs)
self.options = options
def do_get(self, context, ticket):
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
return flight.GeneratorStream(
table.schema,
self.number_batches(table),
options=self.options)
def do_put(self, context, descriptor, reader, writer):
counter = 0
expected_data = [-10, -5, 0, 5, 10]
while True:
try:
batch, buf = reader.read_chunk()
assert batch.equals(pa.RecordBatch.from_arrays(
[pa.array([expected_data[counter]])],
['a']
))
assert buf is not None
client_counter, = struct.unpack('<i', buf.to_pybytes())
assert counter == client_counter
writer.write(struct.pack('<i', counter))
counter += 1
except StopIteration:
return
@staticmethod
def number_batches(table):
for idx, batch in enumerate(table.to_batches()):
buf = struct.pack('<i', idx)
yield batch, buf
class EchoFlightServer(FlightServerBase):
"""A Flight server that returns the last data uploaded."""
def __init__(self, location=None, expected_schema=None, **kwargs):
super().__init__(location, **kwargs)
self.last_message = None
self.expected_schema = expected_schema
def do_get(self, context, ticket):
return flight.RecordBatchStream(self.last_message)
def do_put(self, context, descriptor, reader, writer):
if self.expected_schema:
assert self.expected_schema == reader.schema
self.last_message = reader.read_all()
class EchoStreamFlightServer(EchoFlightServer):
"""An echo server that streams individual record batches."""
def do_get(self, context, ticket):
return flight.GeneratorStream(
self.last_message.schema,
self.last_message.to_batches(max_chunksize=1024))
def list_actions(self, context):
return []
def do_action(self, context, action):
if action.type == "who-am-i":
return [context.peer_identity(), context.peer().encode("utf-8")]
raise NotImplementedError
class GetInfoFlightServer(FlightServerBase):
"""A Flight server that tests GetFlightInfo."""
def get_flight_info(self, context, descriptor):
return flight.FlightInfo(
pa.schema([('a', pa.int32())]),
descriptor,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
def get_schema(self, context, descriptor):
info = self.get_flight_info(context, descriptor)
return flight.SchemaResult(info.schema)
class ListActionsFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
@classmethod
def expected_actions(cls):
return [
("action-1", "description"),
("action-2", ""),
flight.ActionType("action-3", "more detail"),
]
def list_actions(self, context):
yield from self.expected_actions()
class ListActionsErrorFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
def list_actions(self, context):
yield ("action-1", "")
yield "foo"
class CheckTicketFlightServer(FlightServerBase):
"""A Flight server that compares the given ticket to an expected value."""
def __init__(self, expected_ticket, location=None, **kwargs):
super().__init__(location, **kwargs)
self.expected_ticket = expected_ticket
def do_get(self, context, ticket):
assert self.expected_ticket == ticket.ticket
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
return flight.RecordBatchStream(table)
def do_put(self, context, descriptor, reader):
self.last_message = reader.read_all()
class InvalidStreamFlightServer(FlightServerBase):
"""A Flight server that tries to return messages with differing schemas."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
data2 = [pa.array([-10.0, -5.0, 0.0, 5.0, 10.0], type=pa.float64())]
assert data1.type != data2.type
table1 = pa.Table.from_arrays(data1, names=['a'])
table2 = pa.Table.from_arrays(data2, names=['a'])
assert table1.schema == self.schema
return flight.GeneratorStream(self.schema, [table1, table2])
class NeverSendsDataFlightServer(FlightServerBase):
"""A Flight server that never actually yields data."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
if ticket.ticket == b'yield_data':
# Check that the server handler will ignore empty tables
# up to a certain extent
data = [
self.schema.empty_table(),
self.schema.empty_table(),
pa.RecordBatch.from_arrays([range(5)], schema=self.schema),
]
return flight.GeneratorStream(self.schema, data)
return flight.GeneratorStream(
self.schema, itertools.repeat(self.schema.empty_table()))
class SlowFlightServer(FlightServerBase):
"""A Flight server that delays its responses to test timeouts."""
def do_get(self, context, ticket):
return flight.GeneratorStream(pa.schema([('a', pa.int32())]),
self.slow_stream())
def do_action(self, context, action):
time.sleep(0.5)
return []
@staticmethod
def slow_stream():
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
yield pa.Table.from_arrays(data1, names=['a'])
# The second message should never get sent; the client should
# cancel before we send this
time.sleep(10)
yield pa.Table.from_arrays(data1, names=['a'])
class ErrorFlightServer(FlightServerBase):
"""A Flight server that uses all the Flight-specific errors."""
def do_action(self, context, action):
if action.type == "internal":
raise flight.FlightInternalError("foo")
elif action.type == "timedout":
raise flight.FlightTimedOutError("foo")
elif action.type == "cancel":
raise flight.FlightCancelledError("foo")
elif action.type == "unauthenticated":
raise flight.FlightUnauthenticatedError("foo")
elif action.type == "unauthorized":
raise flight.FlightUnauthorizedError("foo")
elif action.type == "protobuf":
err_msg = b'this is an error message'
raise flight.FlightUnauthorizedError("foo", err_msg)
raise NotImplementedError
def list_flights(self, context, criteria):
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
raise flight.FlightInternalError("foo")
class ExchangeFlightServer(FlightServerBase):
"""A server for testing DoExchange."""
def __init__(self, options=None, **kwargs):
super().__init__(**kwargs)
self.options = options
def do_exchange(self, context, descriptor, reader, writer):
if descriptor.descriptor_type != flight.DescriptorType.CMD:
raise pa.ArrowInvalid("Must provide a command descriptor")
elif descriptor.command == b"echo":
return self.exchange_echo(context, reader, writer)
elif descriptor.command == b"get":
return self.exchange_do_get(context, reader, writer)
elif descriptor.command == b"put":
return self.exchange_do_put(context, reader, writer)
elif descriptor.command == b"transform":
return self.exchange_transform(context, reader, writer)
else:
raise pa.ArrowInvalid(
"Unknown command: {}".format(descriptor.command))
def exchange_do_get(self, context, reader, writer):
"""Emulate DoGet with DoExchange."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
writer.begin(data.schema)
writer.write_table(data)
def exchange_do_put(self, context, reader, writer):
"""Emulate DoPut with DoExchange."""
num_batches = 0
for chunk in reader:
if not chunk.data:
raise pa.ArrowInvalid("All chunks must have data.")
num_batches += 1
writer.write_metadata(str(num_batches).encode("utf-8"))
def exchange_echo(self, context, reader, writer):
"""Run a simple echo server."""
started = False
for chunk in reader:
if not started and chunk.data:
writer.begin(chunk.data.schema, options=self.options)
started = True
if chunk.app_metadata and chunk.data:
writer.write_with_metadata(chunk.data, chunk.app_metadata)
elif chunk.app_metadata:
writer.write_metadata(chunk.app_metadata)
elif chunk.data:
writer.write_batch(chunk.data)
else:
assert False, "Should not happen"
def exchange_transform(self, context, reader, writer):
"""Sum rows in an uploaded table."""
for field in reader.schema:
if not pa.types.is_integer(field.type):
raise pa.ArrowInvalid("Invalid field: " + repr(field))
table = reader.read_all()
sums = [0] * table.num_rows
for column in table:
for row, value in enumerate(column):
sums[row] += value.as_py()
result = pa.Table.from_arrays([pa.array(sums)], names=["sum"])
writer.begin(result.schema)
writer.write_table(result)
class HttpBasicServerAuthHandler(ServerAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
buf = incoming.read()
auth = flight.BasicAuth.deserialize(buf)
if auth.username not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
if self.creds[auth.username] != auth.password:
raise flight.FlightUnauthenticatedError("wrong password")
outgoing.write(tobytes(auth.username))
def is_valid(self, token):
if not token:
raise flight.FlightUnauthenticatedError("token not provided")
if token not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
return token
class HttpBasicClientAuthHandler(ClientAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, username, password):
super().__init__()
self.basic_auth = flight.BasicAuth(username, password)
self.token = None
def authenticate(self, outgoing, incoming):
auth = self.basic_auth.serialize()
outgoing.write(auth)
self.token = incoming.read()
def get_token(self):
return self.token
class TokenServerAuthHandler(ServerAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
username = incoming.read()
password = incoming.read()
if username in self.creds and self.creds[username] == password:
outgoing.write(base64.b64encode(b'secret:' + username))
else:
raise flight.FlightUnauthenticatedError(
"invalid username/password")
def is_valid(self, token):
token = base64.b64decode(token)
if not token.startswith(b'secret:'):
raise flight.FlightUnauthenticatedError("invalid token")
return token[7:]
class TokenClientAuthHandler(ClientAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, username, password):
super().__init__()
self.username = username
self.password = password
self.token = b''
def authenticate(self, outgoing, incoming):
outgoing.write(self.username)
outgoing.write(self.password)
self.token = incoming.read()
def get_token(self):
return self.token
class NoopAuthHandler(ServerAuthHandler):
"""A no-op auth handler."""
def authenticate(self, outgoing, incoming):
"""Do nothing."""
def is_valid(self, token):
"""
Returning an empty string.
Returning None causes Type error.
"""
return ""
def case_insensitive_header_lookup(headers, lookup_key):
"""Lookup the value of given key in the given headers.
The key lookup is case insensitive.
"""
for key in headers:
if key.lower() == lookup_key.lower():
return headers.get(key)
class ClientHeaderAuthMiddlewareFactory(ClientMiddlewareFactory):
"""ClientMiddlewareFactory that creates ClientAuthHeaderMiddleware."""
def __init__(self):
self.call_credential = []
def start_call(self, info):
return ClientHeaderAuthMiddleware(self)
def set_call_credential(self, call_credential):
self.call_credential = call_credential
class ClientHeaderAuthMiddleware(ClientMiddleware):
"""
ClientMiddleware that extracts the authorization header
from the server.
This is an example of a ClientMiddleware that can extract
the bearer token authorization header from a HTTP header
authentication enabled server.
Parameters
----------
factory : ClientHeaderAuthMiddlewareFactory
This factory is used to set call credentials if an
authorization header is found in the headers from the server.
"""
def __init__(self, factory):
self.factory = factory
def received_headers(self, headers):
auth_header = case_insensitive_header_lookup(headers, 'Authorization')
self.factory.set_call_credential([
b'authorization',
auth_header[0].encode("utf-8")])
class HeaderAuthServerMiddlewareFactory(ServerMiddlewareFactory):
"""Validates incoming username and password."""
def start_call(self, info, headers):
auth_header = case_insensitive_header_lookup(
headers,
'Authorization'
)
values = auth_header[0].split(' ')
token = ''
error_message = 'Invalid credentials'
if values[0] == 'Basic':
decoded = base64.b64decode(values[1])
pair = decoded.decode("utf-8").split(':')
if not (pair[0] == 'test' and pair[1] == 'password'):
raise flight.FlightUnauthenticatedError(error_message)
token = 'token1234'
elif values[0] == 'Bearer':
token = values[1]
if not token == 'token1234':
raise flight.FlightUnauthenticatedError(error_message)
else:
raise flight.FlightUnauthenticatedError(error_message)
return HeaderAuthServerMiddleware(token)
class HeaderAuthServerMiddleware(ServerMiddleware):
"""A ServerMiddleware that transports incoming username and passowrd."""
def __init__(self, token):
self.token = token
def sending_headers(self):
return {'authorization': 'Bearer ' + self.token}
class HeaderAuthFlightServer(FlightServerBase):
"""A Flight server that tests with basic token authentication. """
def do_action(self, context, action):
middleware = context.get_middleware("auth")
if middleware:
auth_header = case_insensitive_header_lookup(
middleware.sending_headers(), 'Authorization')
values = auth_header.split(' ')
return [values[1].encode("utf-8")]
raise flight.FlightUnauthenticatedError(
'No token auth middleware found.')
class ArbitraryHeadersServerMiddlewareFactory(ServerMiddlewareFactory):
"""A ServerMiddlewareFactory that transports arbitrary headers."""
def start_call(self, info, headers):
return ArbitraryHeadersServerMiddleware(headers)
class ArbitraryHeadersServerMiddleware(ServerMiddleware):
"""A ServerMiddleware that transports arbitrary headers."""
def __init__(self, incoming):
self.incoming = incoming
def sending_headers(self):
return self.incoming
class ArbitraryHeadersFlightServer(FlightServerBase):
"""A Flight server that tests multiple arbitrary headers."""
def do_action(self, context, action):
middleware = context.get_middleware("arbitrary-headers")
if middleware:
headers = middleware.sending_headers()
header_1 = case_insensitive_header_lookup(
headers,
'test-header-1'
)
header_2 = case_insensitive_header_lookup(
headers,
'test-header-2'
)
value1 = header_1[0].encode("utf-8")
value2 = header_2[0].encode("utf-8")
return [value1, value2]
raise flight.FlightServerError("No headers middleware found")
class HeaderServerMiddleware(ServerMiddleware):
"""Expose a per-call value to the RPC method body."""
def __init__(self, special_value):
self.special_value = special_value
class HeaderServerMiddlewareFactory(ServerMiddlewareFactory):
"""Expose a per-call hard-coded value to the RPC method body."""
def start_call(self, info, headers):
return HeaderServerMiddleware("right value")
class HeaderFlightServer(FlightServerBase):
"""Echo back the per-call hard-coded value."""
def do_action(self, context, action):
middleware = context.get_middleware("test")
if middleware:
return [middleware.special_value.encode()]
return [b""]
class MultiHeaderFlightServer(FlightServerBase):
"""Test sending/receiving multiple (binary-valued) headers."""
def do_action(self, context, action):
middleware = context.get_middleware("test")
headers = repr(middleware.client_headers).encode("utf-8")
return [headers]
class SelectiveAuthServerMiddlewareFactory(ServerMiddlewareFactory):
"""Deny access to certain methods based on a header."""
def start_call(self, info, headers):
if info.method == flight.FlightMethod.LIST_ACTIONS:
# No auth needed
return
token = headers.get("x-auth-token")
if not token:
raise flight.FlightUnauthenticatedError("No token")
token = token[0]
if token != "password":
raise flight.FlightUnauthenticatedError("Invalid token")
return HeaderServerMiddleware(token)
class SelectiveAuthClientMiddlewareFactory(ClientMiddlewareFactory):
def start_call(self, info):
return SelectiveAuthClientMiddleware()
class SelectiveAuthClientMiddleware(ClientMiddleware):
def sending_headers(self):
return {
"x-auth-token": "password",
}
class RecordingServerMiddlewareFactory(ServerMiddlewareFactory):
"""Record what methods were called."""
def __init__(self):
super().__init__()
self.methods = []
def start_call(self, info, headers):
self.methods.append(info.method)
return None
class RecordingClientMiddlewareFactory(ClientMiddlewareFactory):
"""Record what methods were called."""
def __init__(self):
super().__init__()
self.methods = []
def start_call(self, info):
self.methods.append(info.method)
return None
class MultiHeaderClientMiddlewareFactory(ClientMiddlewareFactory):
"""Test sending/receiving multiple (binary-valued) headers."""
def __init__(self):
# Read in test_middleware_multi_header below.
# The middleware instance will update this value.
self.last_headers = {}
def start_call(self, info):
return MultiHeaderClientMiddleware(self)
class MultiHeaderClientMiddleware(ClientMiddleware):
"""Test sending/receiving multiple (binary-valued) headers."""
EXPECTED = {
"x-text": ["foo", "bar"],
"x-binary-bin": [b"\x00", b"\x01"],
}
def __init__(self, factory):
self.factory = factory
def sending_headers(self):
return self.EXPECTED
def received_headers(self, headers):
# Let the test code know what the last set of headers we
# received were.
self.factory.last_headers = headers
class MultiHeaderServerMiddlewareFactory(ServerMiddlewareFactory):
"""Test sending/receiving multiple (binary-valued) headers."""
def start_call(self, info, headers):
return MultiHeaderServerMiddleware(headers)
class MultiHeaderServerMiddleware(ServerMiddleware):
"""Test sending/receiving multiple (binary-valued) headers."""
def __init__(self, client_headers):
self.client_headers = client_headers
def sending_headers(self):
return MultiHeaderClientMiddleware.EXPECTED
def test_flight_server_location_argument():
locations = [
None,
'grpc://localhost:0',
('localhost', find_free_port()),
]
for location in locations:
with FlightServerBase(location) as server:
assert isinstance(server, FlightServerBase)
def test_server_exit_reraises_exception():
with pytest.raises(ValueError):
with FlightServerBase():
raise ValueError()
@pytest.mark.slow
def test_client_wait_for_available():
location = ('localhost', find_free_port())
server = None
def serve():
global server
time.sleep(0.5)
server = FlightServerBase(location)
server.serve()
client = FlightClient(location)
thread = threading.Thread(target=serve, daemon=True)
thread.start()
started = time.time()
client.wait_for_available(timeout=5)
elapsed = time.time() - started
assert elapsed >= 0.5
def test_flight_list_flights():
"""Try a simple list_flights call."""
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
assert list(client.list_flights()) == []
flights = client.list_flights(ConstantFlightServer.CRITERIA)
assert len(list(flights)) == 1
def test_flight_do_get_ints():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4)
with ConstantFlightServer(options=options) as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
# Also test via RecordBatchReader interface
data = client.do_get(flight.Ticket(b'ints')).to_reader().read_all()
assert data.equals(table)
with pytest.raises(flight.FlightServerError,
match="expected IpcWriteOptions, got <class 'int'>"):
with ConstantFlightServer(options=42) as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
@pytest.mark.pandas
def test_do_get_ints_pandas():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_pandas()
assert list(data['some_ints']) == table.column(0).to_pylist()
def test_flight_do_get_dicts():
table = simple_dicts_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'dicts')).read_all()
assert data.equals(table)
def test_flight_do_get_ticket():
"""Make sure Tickets get passed to the server."""
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
with CheckTicketFlightServer(expected_ticket=b'the-ticket') as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'the-ticket')).read_all()
assert data.equals(table)
def test_flight_get_info():
"""Make sure FlightEndpoint accepts string and object URIs."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_flight_info(flight.FlightDescriptor.for_command(b''))
assert info.total_records == -1
assert info.total_bytes == -1
assert info.schema == pa.schema([('a', pa.int32())])
assert len(info.endpoints) == 2
assert len(info.endpoints[0].locations) == 1
assert info.endpoints[0].locations[0] == flight.Location('grpc://test')
assert info.endpoints[1].locations[0] == \
flight.Location.for_grpc_tcp('localhost', 5005)
def test_flight_get_schema():
"""Make sure GetSchema returns correct schema."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_schema(flight.FlightDescriptor.for_command(b''))
assert info.schema == pa.schema([('a', pa.int32())])
def test_list_actions():
"""Make sure the return type of ListActions is validated."""
# ARROW-6392
with ListActionsErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(
flight.FlightServerError,
match=("Results of list_actions must be "
"ActionType or tuple")
):
list(client.list_actions())
with ListActionsFlightServer() as server:
client = FlightClient(('localhost', server.port))
assert list(client.list_actions()) == \
ListActionsFlightServer.expected_actions()
class ConvenienceServer(FlightServerBase):
"""
Server for testing various implementation conveniences (auto-boxing, etc.)
"""
@property
def simple_action_results(self):
return [b'foo', b'bar', b'baz']
def do_action(self, context, action):
if action.type == 'simple-action':
return self.simple_action_results
elif action.type == 'echo':
return [action.body]
elif action.type == 'bad-action':
return ['foo']
elif action.type == 'arrow-exception':
raise pa.ArrowMemoryError()
def test_do_action_result_convenience():
with ConvenienceServer() as server:
client = FlightClient(('localhost', server.port))
# do_action as action type without body
results = [x.body for x in client.do_action('simple-action')]
assert results == server.simple_action_results
# do_action with tuple of type and body
body = b'the-body'
results = [x.body for x in client.do_action(('echo', body))]
assert results == [body]
def test_nicer_server_exceptions():
with ConvenienceServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightServerError,
match="a bytes-like object is required"):
list(client.do_action('bad-action'))
# While Flight/C++ sends across the original status code, it
# doesn't get mapped to the equivalent code here, since we
# want to be able to distinguish between client- and server-
# side errors.
with pytest.raises(flight.FlightServerError,
match="ArrowMemoryError"):
list(client.do_action('arrow-exception'))
def test_get_port():
"""Make sure port() works."""
server = GetInfoFlightServer("grpc://localhost:0")
try:
assert server.port > 0
finally:
server.shutdown()
@pytest.mark.skipif(os.name == 'nt',
reason="Unix sockets can't be tested on Windows")
def test_flight_domain_socket():
"""Try a simple do_get call over a Unix domain socket."""
with tempfile.NamedTemporaryFile() as sock:
sock.close()
location = flight.Location.for_grpc_unix(sock.name)
with ConstantFlightServer(location=location):
client = FlightClient(location)
reader = client.do_get(flight.Ticket(b'ints'))
table = simple_ints_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
reader = client.do_get(flight.Ticket(b'dicts'))
table = simple_dicts_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
@pytest.mark.slow
def test_flight_large_message():
"""Try sending/receiving a large message via Flight.
See ARROW-4421: by default, gRPC won't allow us to send messages >
4MiB in size.
"""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024 * 1024))
], names=['a'])
with EchoFlightServer(expected_schema=data.schema) as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
# Write a single giant chunk
writer.write_table(data, 10 * 1024 * 1024)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_generator_stream():
"""Try downloading a flight of RecordBatches in a GeneratorStream."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=['a'])
with EchoStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
writer.write_table(data)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_invalid_generator_stream():
"""Try streaming data with mismatched schemas."""
with InvalidStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(pa.ArrowException):
client.do_get(flight.Ticket(b'')).read_all()
def test_timeout_fires():
"""Make sure timeouts fire on slow requests."""
# Do this in a separate thread so that if it fails, we don't hang
# the entire test process
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("", b"")
options = flight.FlightCallOptions(timeout=0.2)
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightTimedOutError):
list(client.do_action(action, options=options))
def test_timeout_passes():
"""Make sure timeouts do not fire on fast requests."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
options = flight.FlightCallOptions(timeout=5.0)
client.do_get(flight.Ticket(b'ints'), options=options).read_all()
basic_auth_handler = HttpBasicServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
token_auth_handler = TokenServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
@pytest.mark.slow
def test_http_basic_unauth():
"""Test that auth fails when not authenticated."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*unauthenticated.*"):
list(client.do_action(action))
@pytest.mark.skipif(os.name == 'nt',
reason="ARROW-10013: gRPC on Windows corrupts peer()")
def test_http_basic_auth():
"""Test a Python implementation of HTTP basic authentication."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd'))
results = client.do_action(action)
identity = next(results)
assert identity.body.to_pybytes() == b'test'
peer_address = next(results)
assert peer_address.body.to_pybytes() != b''
def test_http_basic_auth_invalid_password():
"""Test that auth fails with the wrong password."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*wrong password.*"):
client.authenticate(HttpBasicClientAuthHandler('test', 'wrong'))
next(client.do_action(action))
def test_token_auth():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_token_auth_invalid():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightUnauthenticatedError):
client.authenticate(TokenClientAuthHandler('test', 'wrong'))
header_auth_server_middleware_factory = HeaderAuthServerMiddlewareFactory()
no_op_auth_handler = NoopAuthHandler()
def test_authenticate_basic_token():
"""Test authenticate_basic_token with bearer token and auth headers."""
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
"auth": HeaderAuthServerMiddlewareFactory()
}) as server:
client = FlightClient(('localhost', server.port))
token_pair = client.authenticate_basic_token(b'test', b'password')
assert token_pair[0] == b'authorization'
assert token_pair[1] == b'Bearer token1234'
def test_authenticate_basic_token_invalid_password():
"""Test authenticate_basic_token with an invalid password."""
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
"auth": HeaderAuthServerMiddlewareFactory()
}) as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightUnauthenticatedError):
client.authenticate_basic_token(b'test', b'badpassword')
def test_authenticate_basic_token_and_action():
"""Test authenticate_basic_token and doAction after authentication."""
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
"auth": HeaderAuthServerMiddlewareFactory()
}) as server:
client = FlightClient(('localhost', server.port))
token_pair = client.authenticate_basic_token(b'test', b'password')
assert token_pair[0] == b'authorization'
assert token_pair[1] == b'Bearer token1234'
options = flight.FlightCallOptions(headers=[token_pair])
result = list(client.do_action(
action=flight.Action('test-action', b''), options=options))
assert result[0].body.to_pybytes() == b'token1234'
def test_authenticate_basic_token_with_client_middleware():
"""Test authenticate_basic_token with client middleware
to intercept authorization header returned by the
HTTP header auth enabled server.
"""
with HeaderAuthFlightServer(auth_handler=no_op_auth_handler, middleware={
"auth": HeaderAuthServerMiddlewareFactory()
}) as server:
client_auth_middleware = ClientHeaderAuthMiddlewareFactory()
client = FlightClient(
('localhost', server.port),
middleware=[client_auth_middleware]
)
encoded_credentials = base64.b64encode(b'test:password')
options = flight.FlightCallOptions(headers=[
(b'authorization', b'Basic ' + encoded_credentials)
])
result = list(client.do_action(
action=flight.Action('test-action', b''), options=options))
assert result[0].body.to_pybytes() == b'token1234'
assert client_auth_middleware.call_credential[0] == b'authorization'
assert client_auth_middleware.call_credential[1] == \
b'Bearer ' + b'token1234'
result2 = list(client.do_action(
action=flight.Action('test-action', b''), options=options))
assert result2[0].body.to_pybytes() == b'token1234'
assert client_auth_middleware.call_credential[0] == b'authorization'
assert client_auth_middleware.call_credential[1] == \
b'Bearer ' + b'token1234'
def test_arbitrary_headers_in_flight_call_options():
"""Test passing multiple arbitrary headers to the middleware."""
with ArbitraryHeadersFlightServer(
auth_handler=no_op_auth_handler,
middleware={
"auth": HeaderAuthServerMiddlewareFactory(),
"arbitrary-headers": ArbitraryHeadersServerMiddlewareFactory()
}) as server:
client = FlightClient(('localhost', server.port))
token_pair = client.authenticate_basic_token(b'test', b'password')
assert token_pair[0] == b'authorization'
assert token_pair[1] == b'Bearer token1234'
options = flight.FlightCallOptions(headers=[
token_pair,
(b'test-header-1', b'value1'),
(b'test-header-2', b'value2')
])
result = list(client.do_action(flight.Action(
"test-action", b""), options=options))
assert result[0].body.to_pybytes() == b'value1'
assert result[1].body.to_pybytes() == b'value2'
def test_location_invalid():
"""Test constructing invalid URIs."""
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
flight.connect("%")
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
ConstantFlightServer("%")
def test_location_unknown_scheme():
"""Test creating locations for unknown schemes."""
assert flight.Location("s3://foo").uri == b"s3://foo"
assert flight.Location("https://example.com/bar.parquet").uri == \
b"https://example.com/bar.parquet"
@pytest.mark.slow
@pytest.mark.requires_testing_data
def test_tls_fails():
"""Make sure clients cannot connect when cert verification fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
# Ensure client doesn't connect when certificate verification
# fails (this is a slow test since gRPC does retry a few times)
client = FlightClient("grpc+tls://localhost:" + str(s.port))
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints')).read_all()
@pytest.mark.requires_testing_data
def test_tls_do_get():
"""Try a simple do_get call over TLS."""
table = simple_ints_table()
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = FlightClient(('localhost', s.port),
tls_root_certs=certs["root_cert"])
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_disable_server_verification():
"""Try a simple do_get call over TLS with server verification disabled."""
table = simple_ints_table()
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
try:
client = FlightClient(('localhost', s.port),
disable_server_verification=True)
except NotImplementedError:
pytest.skip('disable_server_verification feature is not available')
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_override_hostname():
"""Check that incorrectly overriding the hostname fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
override_hostname="fakehostname")
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
def test_flight_do_get_metadata():
"""Try a simple do_get call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
batches = []
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b''))
idx = 0
while True:
try:
batch, metadata = reader.read_chunk()
batches.append(batch)
server_idx, = struct.unpack('<i', metadata.to_pybytes())
assert idx == server_idx
idx += 1
except StopIteration:
break
data = pa.Table.from_batches(batches)
assert data.equals(table)
def test_flight_do_get_metadata_v4():
"""Try a simple do_get call with V4 metadata version."""
table = pa.Table.from_arrays(
[pa.array([-10, -5, 0, 5, 10])], names=['a'])
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4)
with MetadataFlightServer(options=options) as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b''))
data = reader.read_all()
assert data.equals(table)
def test_flight_do_put_metadata():
"""Try a simple do_put call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
with writer:
for idx, batch in enumerate(table.to_batches(max_chunksize=1)):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
buf = metadata_reader.read()
assert buf is not None
server_idx, = struct.unpack('<i', buf.to_pybytes())
assert idx == server_idx
def test_flight_do_put_limit():
"""Try a simple do_put call with a size limit."""
large_batch = pa.RecordBatch.from_arrays([
pa.array(np.ones(768, dtype=np.int64())),
], names=['a'])
with EchoFlightServer() as server:
client = FlightClient(('localhost', server.port),
write_size_limit_bytes=4096)
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
large_batch.schema)
with writer:
with pytest.raises(flight.FlightWriteSizeExceededError,
match="exceeded soft limit") as excinfo:
writer.write_batch(large_batch)
assert excinfo.value.limit == 4096
smaller_batches = [
large_batch.slice(0, 384),
large_batch.slice(384),
]
for batch in smaller_batches:
writer.write_batch(batch)
expected = pa.Table.from_batches([large_batch])
actual = client.do_get(flight.Ticket(b'')).read_all()
assert expected == actual
@pytest.mark.slow
def test_cancel_do_get():
"""Test canceling a DoGet operation on the client side."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
reader.cancel()
with pytest.raises(flight.FlightCancelledError, match=".*Cancel.*"):
reader.read_chunk()
@pytest.mark.slow
def test_cancel_do_get_threaded():
"""Test canceling a DoGet operation from another thread."""
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
read_first_message = threading.Event()
stream_canceled = threading.Event()
result_lock = threading.Lock()
raised_proper_exception = threading.Event()
def block_read():
reader.read_chunk()
read_first_message.set()
stream_canceled.wait(timeout=5)
try:
reader.read_chunk()
except flight.FlightCancelledError:
with result_lock:
raised_proper_exception.set()
thread = threading.Thread(target=block_read, daemon=True)
thread.start()
read_first_message.wait(timeout=5)
reader.cancel()
stream_canceled.set()
thread.join(timeout=1)
with result_lock:
assert raised_proper_exception.is_set()
def test_roundtrip_types():
"""Make sure serializable types round-trip."""
ticket = flight.Ticket("foo")
assert ticket == flight.Ticket.deserialize(ticket.serialize())
desc = flight.FlightDescriptor.for_command("test")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
desc = flight.FlightDescriptor.for_path("a", "b", "test.arrow")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
info = flight.FlightInfo(
pa.schema([('a', pa.int32())]),
desc,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
info2 = flight.FlightInfo.deserialize(info.serialize())
assert info.schema == info2.schema
assert info.descriptor == info2.descriptor
assert info.total_bytes == info2.total_bytes
assert info.total_records == info2.total_records
assert info.endpoints == info2.endpoints
def test_roundtrip_errors():
"""Ensure that Flight errors propagate from server to client."""
with ErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.do_action(flight.Action("internal", b"")))
with pytest.raises(flight.FlightTimedOutError, match=".*foo.*"):
list(client.do_action(flight.Action("timedout", b"")))
with pytest.raises(flight.FlightCancelledError, match=".*foo.*"):
list(client.do_action(flight.Action("cancel", b"")))
with pytest.raises(flight.FlightUnauthenticatedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthenticated", b"")))
with pytest.raises(flight.FlightUnauthorizedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthorized", b"")))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.list_flights())
def test_do_put_independent_read_write():
"""Ensure that separate threads can read/write on a DoPut."""
# ARROW-6063: previously this would cause gRPC to abort when the
# writer was closed (due to simultaneous reads), or would hang
# forever.
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
count = [0]
def _reader_thread():
while metadata_reader.read() is not None:
count[0] += 1
thread = threading.Thread(target=_reader_thread)
thread.start()
batches = table.to_batches(max_chunksize=1)
with writer:
for idx, batch in enumerate(batches):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
# Causes the server to stop writing and end the call
writer.done_writing()
# Thus reader thread will break out of loop
thread.join()
# writer.close() won't segfault since reader thread has
# stopped
assert count[0] == len(batches)
def test_server_middleware_same_thread():
"""Ensure that server middleware run on the same thread as the RPC."""
with HeaderFlightServer(middleware={
"test": HeaderServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
results = list(client.do_action(flight.Action(b"test", b"")))
assert len(results) == 1
value = results[0].body.to_pybytes()
assert b"right value" == value
def test_middleware_reject():
"""Test rejecting an RPC with server middleware."""
with HeaderFlightServer(middleware={
"test": SelectiveAuthServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
# The middleware allows this through without auth.
with pytest.raises(pa.ArrowNotImplementedError):
list(client.list_actions())
# But not anything else.
with pytest.raises(flight.FlightUnauthenticatedError):
list(client.do_action(flight.Action(b"", b"")))
client = FlightClient(
('localhost', server.port),
middleware=[SelectiveAuthClientMiddlewareFactory()]
)
response = next(client.do_action(flight.Action(b"", b"")))
assert b"password" == response.body.to_pybytes()
def test_middleware_mapping():
"""Test that middleware records methods correctly."""
server_middleware = RecordingServerMiddlewareFactory()
client_middleware = RecordingClientMiddlewareFactory()
with FlightServerBase(middleware={"test": server_middleware}) as server:
client = FlightClient(
('localhost', server.port),
middleware=[client_middleware]
)
descriptor = flight.FlightDescriptor.for_command(b"")
with pytest.raises(NotImplementedError):
list(client.list_flights())
with pytest.raises(NotImplementedError):
client.get_flight_info(descriptor)
with pytest.raises(NotImplementedError):
client.get_schema(descriptor)
with pytest.raises(NotImplementedError):
client.do_get(flight.Ticket(b""))
with pytest.raises(NotImplementedError):
writer, _ = client.do_put(descriptor, pa.schema([]))
writer.close()
with pytest.raises(NotImplementedError):
list(client.do_action(flight.Action(b"", b"")))
with pytest.raises(NotImplementedError):
list(client.list_actions())
with pytest.raises(NotImplementedError):
writer, _ = client.do_exchange(descriptor)
writer.close()
expected = [
flight.FlightMethod.LIST_FLIGHTS,
flight.FlightMethod.GET_FLIGHT_INFO,
flight.FlightMethod.GET_SCHEMA,
flight.FlightMethod.DO_GET,
flight.FlightMethod.DO_PUT,
flight.FlightMethod.DO_ACTION,
flight.FlightMethod.LIST_ACTIONS,
flight.FlightMethod.DO_EXCHANGE,
]
assert server_middleware.methods == expected
assert client_middleware.methods == expected
def test_extra_info():
with ErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
try:
list(client.do_action(flight.Action("protobuf", b"")))
assert False
except flight.FlightUnauthorizedError as e:
assert e.extra_info is not None
ei = e.extra_info
assert ei == b'this is an error message'
@pytest.mark.requires_testing_data
def test_mtls():
"""Test mutual TLS (mTLS) with gRPC."""
certs = example_tls_certs()
table = simple_ints_table()
with ConstantFlightServer(
tls_certificates=[certs["certificates"][0]],
verify_client=True,
root_certificates=certs["root_cert"]) as s:
client = FlightClient(
('localhost', s.port),
tls_root_certs=certs["root_cert"],
cert_chain=certs["certificates"][0].cert,
private_key=certs["certificates"][0].key)
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
def test_doexchange_get():
"""Emulate DoGet with DoExchange."""
expected = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"get")
writer, reader = client.do_exchange(descriptor)
with writer:
table = reader.read_all()
assert expected == table
def test_doexchange_put():
"""Emulate DoPut with DoExchange."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"put")
writer, reader = client.do_exchange(descriptor)
with writer:
writer.begin(data.schema)
for batch in batches:
writer.write_batch(batch)
writer.done_writing()
chunk = reader.read_chunk()
assert chunk.data is None
expected_buf = str(len(batches)).encode("utf-8")
assert chunk.app_metadata == expected_buf
def test_doexchange_echo():
"""Try a DoExchange echo server."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"echo")
writer, reader = client.do_exchange(descriptor)
with writer:
# Read/write metadata before starting data.
for i in range(10):
buf = str(i).encode("utf-8")
writer.write_metadata(buf)
chunk = reader.read_chunk()
assert chunk.data is None
assert chunk.app_metadata == buf
# Now write data without metadata.
writer.begin(data.schema)
for batch in batches:
writer.write_batch(batch)
assert reader.schema == data.schema
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata is None
# And write data with metadata.
for i, batch in enumerate(batches):
buf = str(i).encode("utf-8")
writer.write_with_metadata(batch, buf)
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata == buf
def test_doexchange_echo_v4():
"""Try a DoExchange echo server using the V4 metadata version."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4)
with ExchangeFlightServer(options=options) as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"echo")
writer, reader = client.do_exchange(descriptor)
with writer:
# Now write data without metadata.
writer.begin(data.schema, options=options)
for batch in batches:
writer.write_batch(batch)
assert reader.schema == data.schema
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata is None
def test_doexchange_transform():
"""Transform a table with a service."""
data = pa.Table.from_arrays([
pa.array(range(0, 1024)),
pa.array(range(1, 1025)),
pa.array(range(2, 1026)),
], names=["a", "b", "c"])
expected = pa.Table.from_arrays([
pa.array(range(3, 1024 * 3 + 3, 3)),
], names=["sum"])
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"transform")
writer, reader = client.do_exchange(descriptor)
with writer:
writer.begin(data.schema)
writer.write_table(data)
writer.done_writing()
table = reader.read_all()
assert expected == table
def test_middleware_multi_header():
"""Test sending/receiving multiple (binary-valued) headers."""
with MultiHeaderFlightServer(middleware={
"test": MultiHeaderServerMiddlewareFactory(),
}) as server:
headers = MultiHeaderClientMiddlewareFactory()
client = FlightClient(('localhost', server.port), middleware=[headers])
response = next(client.do_action(flight.Action(b"", b"")))
# The server echoes the headers it got back to us.
raw_headers = response.body.to_pybytes().decode("utf-8")
client_headers = ast.literal_eval(raw_headers)
# Don't directly compare; gRPC may add headers like User-Agent.
for header, values in MultiHeaderClientMiddleware.EXPECTED.items():
assert client_headers.get(header) == values
assert headers.last_headers.get(header) == values
@pytest.mark.requires_testing_data
def test_generic_options():
"""Test setting generic client options."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
# Try setting a string argument that will make requests fail
options = [("grpc.ssl_target_name_override", "fakehostname")]
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
generic_options=options)
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
# Try setting an int argument that will make requests fail
options = [("grpc.max_receive_message_length", 32)]
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
generic_options=options)
with pytest.raises(pa.ArrowInvalid):
client.do_get(flight.Ticket(b'ints'))
class CancelFlightServer(FlightServerBase):
"""A server for testing StopToken."""
def do_get(self, context, ticket):
schema = pa.schema([])
rb = pa.RecordBatch.from_arrays([], schema=schema)
return flight.GeneratorStream(schema, itertools.repeat(rb))
def do_exchange(self, context, descriptor, reader, writer):
schema = pa.schema([])
rb = pa.RecordBatch.from_arrays([], schema=schema)
writer.begin(schema)
while not context.is_cancelled():
writer.write_batch(rb)
time.sleep(0.5)
def test_interrupt():
if threading.current_thread().ident != threading.main_thread().ident:
pytest.skip("test only works from main Python thread")
# Skips test if not available
raise_signal = util.get_raise_signal()
def signal_from_thread():
time.sleep(0.5)
raise_signal(signal.SIGINT)
exc_types = (KeyboardInterrupt, pa.ArrowCancelled)
def test(read_all):
try:
try:
t = threading.Thread(target=signal_from_thread)
with pytest.raises(exc_types) as exc_info:
t.start()
read_all()
finally:
t.join()
except KeyboardInterrupt:
# In case KeyboardInterrupt didn't interrupt read_all
# above, at least prevent it from stopping the test suite
pytest.fail("KeyboardInterrupt didn't interrupt Flight read_all")
e = exc_info.value.__context__
assert isinstance(e, pa.ArrowCancelled) or \
isinstance(e, KeyboardInterrupt)
with CancelFlightServer() as server:
client = FlightClient(("localhost", server.port))
reader = client.do_get(flight.Ticket(b""))
test(reader.read_all)
descriptor = flight.FlightDescriptor.for_command(b"echo")
writer, reader = client.do_exchange(descriptor)
test(reader.read_all)
def test_never_sends_data():
# Regression test for ARROW-12779
match = "application server implementation error"
with NeverSendsDataFlightServer() as server:
client = flight.connect(('localhost', server.port))
with pytest.raises(flight.FlightServerError, match=match):
client.do_get(flight.Ticket(b'')).read_all()
# Check that the server handler will ignore empty tables
# up to a certain extent
table = client.do_get(flight.Ticket(b'yield_data')).read_all()
assert table.num_rows == 5
| apache-2.0 |
schets/LILAC | src/scripts/python/learn-grad.py | 2 | 3221 | import numpy as np
from sklearn import manifold, svm, preprocessing
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
Axes3D
def read_data(gradname, scorename):
#could use numpy.loadtxt, but that blindly accepts nan values
slist = open(scorename, 'r').readlines()
glist = open(gradname, 'r').readlines()
v = (slist, glist)
gkeep = []
skeep = []
for s, g in zip(slist, glist):
sval = float(s.split()[0])
if(np.isfinite(sval)):
skeep.append(np.fromstring(s, sep=' '))
nar = np.fromstring(g, sep=' ')
gkeep.append(nar)
return (np.vstack(skeep), np.vstack(gkeep))
(scores, grads) = read_data("grad_data.out", "score_data.out")
(scores2, grads2) = read_data("grad_data2.out", "score_data2.out")
n_neighbors=50
n_components=15
scaler =preprocessing.StandardScaler().fit(grads);
grads = scaler.transform(grads)
grads2 = scaler.transform(grads2)
#Y = manifold.Isomap(n_neighbors, n_components).fit_transform(grads)
Y=grads #don't perform dimensionality reduction
#perform classification on the dataset, use support vector machine
test_svm = svm.SVC(kernel='rbf', C=20, gamma=0.15, probability=True)
test_svm=test_svm.fit(Y, scores[:, 1])
#get number of stable classified as unstable
pl.figure(0)
(Y, scores) = grads2, scores2
num_stab = sum(1 for s in scores if s[1] == 1)
missed_stab_arr = [s[0] for t, s, in zip(Y, scores) if s[1]==1 and test_svm.predict(t) != s[1]]
pl.hist(missed_stab_arr, bins=20)
pl.title("Frequency of scores for the misclassified stable solutions")
pl.xlabel("score")
missed_stab = len(missed_stab_arr)
ave_score = sum(missed_stab_arr)/len(missed_stab_arr);
max_score = max(missed_stab_arr)
missed_stab_arr = [s[0] for t, s, in zip(Y, scores) if s[1]==1 and test_svm.predict(t) == s[1]]
pl.figure(1)
pl.hist(missed_stab_arr, bins=50, color='green')
pl.title("Frequency of scores for the correctly classified stable solutions")
pl.xlabel("score")
ave_good_score = sum(missed_stab_arr)/len(missed_stab_arr);
max_good_score = max(missed_stab_arr)
print "%d out of %d stable solutions were missclassified" %(missed_stab, num_stab)
print "Average score of the missclassified stable variables is %f, maximum score is %f" % (ave_score, max_score)
print "Average score of the classified stable variables is %f, maximum score is %f" % (ave_good_score, max_good_score)
#calculate average probability of misclassified stable variantsdd
#calculate number of missed unstable variants
num_unstab = sum(1 for s in scores if s[1] == 0)
missed_unstab = sum(1 for t, s in zip(Y, scores) if s[1] == 0 and test_svm.predict(t) != s[1])
print "%d out of %d unstable solutions were missclassified" %(missed_unstab, num_unstab)
pl.legend()
pl.show()
"""
fig = pl.figure(figsize=(15, 8))
pl.suptitle("Manifold Learning with %i points, %i neighbors"
% (grads.shape[1], n_neighbors), fontsize=14)
#perform clustering
# compatibility matplotlib < 1.0
ax = fig.add_subplot(241, projection='3d')
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], c=scores[:, 0], cmap=pl.cm.Spectral)
ax.view_init(4, -72)
pl.show()
#pl.scatter(Y[:, 0], Y[:, 1], Y[:, 2], c=scores, cmap=pl.cm.Spectral)
"""
| bsd-3-clause |
brockk/clintrials | clintrials/dosefinding/efftox.py | 1 | 45709 | __author__ = 'Kristian Brock'
__contact__ = 'kristian.brock@gmail.com'
""" An implementation of Thall & Cook's EffTox design for dose-finding in clinical trials.
See:
Thall, P.F. and Cook, J.D. (2004). Dose-Finding Based on Efficacy-Toxicity Trade-Offs, Biometrics, 60: 684-693.
Cook, J.D. Efficacy-Toxicity trade-offs based on L^p norms, Technical Report UTMDABTR-003-06, April 2006
Berry, Carlin, Lee and Mueller. Bayesian Adaptive Methods for Clinical Trials, Chapman & Hall / CRC Press
"""
from collections import OrderedDict
import logging
import numpy as np
from scipy.optimize import brentq
from clintrials.common import inverse_logit
from clintrials.dosefinding.efficacytoxicity import EfficacyToxicityDoseFindingTrial
from clintrials.stats import ProbabilityDensitySample
from clintrials.util import atomic_to_json, iterable_to_json
def scale_doses(real_doses):
"""
:param real_doses:
:return:
"""
return np.log(real_doses) - np.mean(np.log(real_doses))
def _eta_T(scaled_dose, mu, beta):
return mu + beta * scaled_dose
def _eta_E(scaled_dose, mu, beta1, beta2):
return mu + beta1 * scaled_dose + beta2 * scaled_dose**2
def _pi_T(scaled_dose, mu, beta):
return inverse_logit(_eta_T(scaled_dose, mu, beta))
def _pi_E(scaled_dose, mu, beta1, beta2):
return inverse_logit(_eta_E(scaled_dose, mu, beta1, beta2))
def _pi_ab(scaled_dose, tox, eff, mu_T, beta_T, mu_E, beta1_E, beta2_E, psi):
""" Calculate likelihood of observing toxicity and efficacy with given parameters. """
a, b = eff, tox
p1 = _pi_E(scaled_dose, mu_E, beta1_E, beta2_E)
p2 = _pi_T(scaled_dose, mu_T, beta_T)
response = p1**a * (1-p1)**(1-a) * p2**b * (1-p2)**(1-b)
response += -1**(a+b) * p1 * (1-p1) * p2 * (1-p2) * (np.exp(psi) - 1) / (np.exp(psi) + 1)
return response
def _L_n(D, mu_T, beta_T, mu_E, beta1_E, beta2_E, psi):
""" Calculate compound likelihood of observing cases D with given parameters.
Params:
D, list of 3-tuples, (dose, toxicity, efficacy), where dose is on Thall & Cook's codified scale (see below),
toxicity = 1 for toxic event, 0 for tolerance event,
and efficacy = 1 for efficacious outcome, 0 for alternative.
Note: Thall & Cook's codified scale is thus:
If doses 10mg, 20mg and 25mg are given so that d = [10, 20, 25], then the codified doses, x, are
x = ln(d) - mean(ln(dose)) = [-0.5365, 0.1567, 0.3798]
"""
response = np.ones(len(mu_T))
for scaled_dose, tox, eff in D:
p = _pi_ab(scaled_dose, tox, eff, mu_T, beta_T, mu_E, beta1_E, beta2_E, psi)
response *= p
return response
def efftox_get_posterior_probs(cases, priors, scaled_doses, tox_cutoff, eff_cutoff, n=10**5):
""" Get the posterior probabilities after having observed cumulative data D in an EffTox trial.
Note: This function evaluates the posterior integrals using Monte Carlo integration. Thall & Cook
use the method of Monahan & Genz. I imagine that is quicker and more accurate but it is also
more difficult to program, so I have skipped it. It remains a medium-term aim, however, because
this method is slow.
Params:
cases, list of 3-tuples, (dose, toxicity, efficacy), where dose is the given (1-based) dose level,
toxicity = 1 for a toxicity event; 0 for a tolerance event,
efficacy = 1 for an efficacy event; 0 for a non-efficacy event.
priors, list of prior distributions corresponding to mu_T, beta_T, mu_E, beta1_E, beta2_E, psi respectively
Each prior object should support obj.ppf(x) and obj.pdf(x)
scaled_doses, ordered list of all possible doses where each dose is on Thall & Cook's codified scale (see below),
tox_cutoff, the desired maximum toxicity
eff_cutoff, the desired minimum efficacy
n, number of random points to use in Monte Carlo integration.
Returns:
nested lists of posterior probabilities, [ Prob(Toxicity, Prob(Efficacy), Prob(Toxicity less than cutoff),
Prob(Efficacy greater than cutoff)], for each dose in doses,
i.e. returned obj is of length len(doses) and each interior list of length 4.
Note: Thall & Cook's codified dose scale is thus:
If doses 10mg, 20mg and 25mg are given so that d = [10, 20, 25], then the codified doses, x, are
x = ln(d) - mean(ln(dose)) = [-0.5365, 0.1567, 0.3798]
"""
if len(priors) != 6:
raise ValueError('priors should have 6 items.')
# Convert dose-levels given to dose amounts given
if len(cases) > 0:
dose_levels, tox_events, eff_events = zip(*cases)
scaled_doses_given = [scaled_doses[x-1] for x in dose_levels]
_cases = list(zip(scaled_doses_given, tox_events, eff_events))
else:
_cases = []
# The ranges of integration must be specified. In truth, the integration range is (-Infinity, Infinity)
# for each variable. In practice, though, integrating to infinity is problematic, especially in
# 6 dimensions. The limits of integration should capture all probability density, but not be too
# generous, e.g. -1000 to 1000 would be stupid because the density at most points would be practically zero.
# I use percentage points of the various prior distributions. The risk is that if the prior
# does not cover the posterior range well, it will not estimate it well. This needs attention. TODO
epsilon = 0.000001
limits = [(dist.ppf(epsilon), dist.ppf(1-epsilon)) for dist in priors]
samp = np.column_stack([np.random.uniform(*limit_pair, size=n) for limit_pair in limits])
lik_integrand = lambda x: _L_n(_cases, x[:, 0], x[:, 1], x[:, 2], x[:, 3], x[:, 4], x[:, 5]) \
* priors[0].pdf(x[:, 0]) * priors[1].pdf(x[:, 1]) * priors[2].pdf(x[:, 2]) \
* priors[3].pdf(x[:, 3]) * priors[4].pdf(x[:, 4]) * priors[5].pdf(x[:, 5])
pds = ProbabilityDensitySample(samp, lik_integrand)
probs = []
for x in scaled_doses:
tox_probs = _pi_T(x, mu=samp[:,0], beta=samp[:,1])
eff_probs = _pi_E(x, mu=samp[:,2], beta1=samp[:,3], beta2=samp[:,4])
probs.append((
pds.expectation(tox_probs),
pds.expectation(eff_probs),
pds.expectation(tox_probs < tox_cutoff),
pds.expectation(eff_probs > eff_cutoff)
))
return probs, pds
def efftox_get_posterior_params(cases, priors, scaled_doses, n=10**5):
""" Get the posterior parameter estimates after having observed cumulative data D in an EffTox trial.
Note: This function evaluates the posterior integrals using Monte Carlo integration. Thall & Cook
use the method of Monahan & Genz. I imagine that is quicker and more accurate but it is also
more difficult to program, so I have skipped it. It remains a medium-term aim, however, because
this method is slow.
Params:
cases, list of 3-tuples, (dose, toxicity, efficacy), where dose is the given (1-based) dose level,
toxicity = 1 for a toxicity event; 0 for a tolerance event,
efficacy = 1 for an efficacy event; 0 for a non-efficacy event.
priors, list of prior distributions corresponding to mu_T, beta_T, mu_E, beta1_E, beta2_E, psi respectively
Each prior object should support obj.ppf(x) and obj.pdf(x)
scaled_doses, ordered list of all possible doses where each dose is on Thall & Cook's codified scale (see below),
tox_cutoff, the desired maximum toxicity
eff_cutoff, the desired minimum efficacy
n, number of random points to use in Monte Carlo integration.
Returns:
list of posterior parameters as tuples, [ (mu_T, beta_T, mu_E, beta_T_1, beta_T_2, psi) ], and that's it for now.
i.e. returned obj is of length 1 and first interior tuple is of length 6.
More objects might be added to the outer list eventually.
Note: Thall & Cook's codified dose scale is thus:
If doses 10mg, 20mg and 25mg are given so that d = [10, 20, 25], then the codified doses, x, are
x = ln(d) - mean(ln(dose)) = [-0.5365, 0.1567, 0.3798]
"""
if len(priors) != 6:
raise ValueError('priors should have 6 items.')
# Convert dose-levels given to dose amounts given
if len(cases) > 0:
dose_levels, tox_events, eff_events = zip(*cases)
scaled_doses_given = [scaled_doses[x-1] for x in dose_levels]
_cases = zip(scaled_doses_given, tox_events, eff_events)
else:
_cases = []
# The ranges of integration must be specified. In truth, the integration range is (-Infinity, Infinity)
# for each variable. In practice, though, integrating to infinity is problematic, especially in
# 6 dimensions. The limits of integration should capture all probability density, but not be too
# generous, e.g. -1000 to 1000 would be stupid because the density at most points would be practically zero.
# I use percentage points of the various prior distributions. The risk is that if the prior
# does not cover the posterior range well, it will not estimate it well. This needs attention. TODO
epsilon = 0.000001
limits = [(dist.ppf(epsilon), dist.ppf(1-epsilon)) for dist in priors]
samp = np.column_stack([np.random.uniform(*limit_pair, size=n) for limit_pair in limits])
lik_integrand = lambda x: _L_n(_cases, x[:, 0], x[:, 1], x[:, 2], x[:, 3], x[:, 4], x[:, 5]) \
* priors[0].pdf(x[:, 0]) * priors[1].pdf(x[:, 1]) * priors[2].pdf(x[:, 2]) \
* priors[3].pdf(x[:, 3]) * priors[4].pdf(x[:, 4]) * priors[5].pdf(x[:, 5])
pds = ProbabilityDensitySample(samp, lik_integrand)
params = []
params.append(
(
pds.expectation(samp[:, 0]),
pds.expectation(samp[:, 1]),
pds.expectation(samp[:, 2]),
pds.expectation(samp[:, 3]),
pds.expectation(samp[:, 4]),
pds.expectation(samp[:, 5]),
)
)
return params, pds
# Desirability metrics
class LpNormCurve:
""" Fit an indifference contour using three points and an L-p norm.
The three points are:
* efficacy when toxicity is impossible;
* toxicity when efficacy is guaranteed;
* an equallty desirable hinge point (hinge_eff, hinge_tox) in (0,1)^2
The official EffTox software has used L^p Norms in the trade-off analysis since approximately version 2.
This is the current method as at Aug 2014.
For more information, consult the Cook (2006) paper and the Bayesian methods book for more information.
The hinge point is the equally attractive point of the three that is not on the x- or y-axis.
The p-parameter in the L-p norm is initialised by setting r = 1 as per p.119 in Berry et al
"""
def __init__(self, minimum_tolerable_efficacy, maximum_tolerable_toxicity, hinge_prob_eff, hinge_prob_tox):
"""
Params:
minimum_tolerable_efficacy, pi_E^*, the tolerable efficacy when toxicity is impossible
maximum_tolerable_toxicity, pi_T^*, the tolerable toxicity when efficacy is guaranteed
hinge_prob_eff, probability of efficacy at the hinge point
hinge_prob_tox, probability of toxicity at the hinge point
"""
if hinge_prob_tox >= maximum_tolerable_toxicity:
raise ValueError('Probability of toxicity at hinge point should be less than toxicity upper bound.')
if hinge_prob_eff <= minimum_tolerable_efficacy:
raise ValueError('Probability of efficacy at hinge point should be greater than efficacy lower bound.')
def _find_p(p):
a = ((1-hinge_prob_eff)/(1-minimum_tolerable_efficacy))
b = hinge_prob_tox / maximum_tolerable_toxicity
return a**p + b**p - 1
self.minimum_tolerable_efficacy = minimum_tolerable_efficacy
self.maximum_tolerable_toxicity = maximum_tolerable_toxicity
self.p = brentq(_find_p, 0, 100)
self.hinge_points = [(minimum_tolerable_efficacy, 0), (1, maximum_tolerable_toxicity),
(hinge_prob_eff, hinge_prob_tox)]
def __call__(self, prob_eff, prob_tox):
x = prob_eff
y = prob_tox
if np.all(0 < x) and np.all(x < 1) and np.all(0 < y) and np.all(y < 1):
a = ((1 - x) / (1 - self.minimum_tolerable_efficacy))
b = y / self.maximum_tolerable_toxicity
r_to_the_p = a**self.p + b**self.p
return 1 - r_to_the_p ** (1./self.p)
else:
response = np.zeros_like(x)
response *= np.nan
return response
def solve(self, prob_eff=None, prob_tox=None, delta=0):
""" Specify exactly one of prob_eff or prob_tox and this will return the other, for given delta"""
if prob_eff is None and prob_tox is None:
raise Exception('Specify prob_eff or prob_tox')
if prob_eff is not None and prob_tox is not None:
raise Exception('Specify just one of prob_eff and prob_tox')
x, y = prob_eff, prob_tox
x_l, y_l = self.minimum_tolerable_efficacy, self.maximum_tolerable_toxicity
scaled_delta = (1-delta)**self.p
if x is None:
# Solve for x
b = y / y_l
a = (scaled_delta - b**self.p)**(1/self.p)
return 1 - (1 - x_l)*a
else:
# Solve for y
a = ((1 - x) / (1 - x_l))
b = (scaled_delta - a**self.p)**(1/self.p)
return b*y_l
def get_tox(self, eff, util=0.0):
""" Get equivalent toxicity probability for given efficacy probability and utility value.
:param eff: efficacy probability
:type eff: float
:param util: utility value, defaults to zerp for neutral utility
:type util: float
:return: toxicity probability
:rtype: float
"""
p = self.p
eff0 = self.minimum_tolerable_efficacy
tox1 = self.maximum_tolerable_toxicity
a = ((1-eff) / (1-eff0))
return tox1 * ((1-util)**p -a**p)**(1/p)
def plot_contours(self, use_ggplot=False, prob_eff=None, prob_tox=None, n=1000,
util_lower=-0.8, util_upper=0.8, util_delta=0.2, title='EffTox utility contours',
custom_points_label = 'priors'):
"""
:param use_ggplot: True to use ggplot, False to use matplotlib
:type use_ggplot: bool
:param prob_eff: optional
:type prob_eff: list
:param prob_tox:
:type prob_tox: list
:param n: number of points per line
:type n: int
:param util_lower: lowest utility value to plot contour for
:type util_lower: float
:param util_upper: lowest utility value to plot contour for
:type util_upper: float
:param util_delta: plot contours for each increment in utility
:type util_delta: float
:param title: chart title
:type title: str
:param custom_points_label: label for points provided via prob_eff and prob_tox
:type custom_points_label: str
:return: plot of efficacy-toxicity contours
"""
eff_vals = np.linspace(0, 1, n)
util_vals = np.linspace(util_lower, util_upper, ((util_upper-util_lower) / util_delta) + 1)
if use_ggplot:
raise NotImplementedError()
else:
import matplotlib.pyplot as plt
# Plot general contours
for u in util_vals:
tox_vals = [self.get_tox(eff=x, util=u) for x in eff_vals]
plt.plot(eff_vals, tox_vals, '-', c='k', lw=0.5)
# Add neutral utility contour
tox_vals = [self.get_tox(eff=x, util=0) for x in eff_vals]
plt.plot(eff_vals, tox_vals, '-', c='k', lw=2, label='neutral utility')
# Add hinge points
hinge_prob_eff, hinge_prob_tox = zip(*self.hinge_points)
plt.plot(hinge_prob_eff, hinge_prob_tox, 'ro', ms=10, label='hinge points')
# Add custom points
if prob_eff is not None and prob_tox is not None:
plt.plot(prob_eff, prob_tox, 'b^', ms=10, label=custom_points_label)
# Plot size
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.xlabel('Prob(Efficacy)')
plt.ylabel('Prob(Toxicity)')
plt.title(title)
plt.legend()
# Return
p = plt.gcf()
phi = (np.sqrt(5)+1)/2.
p.set_size_inches(12, 12/phi)
class InverseQuadraticCurve:
""" Fit an indifference contour of the type, y = a + b/x + c/x^2 where y = Prob(Tox) and x = Prob(Eff).
The official EffTox software has used L^p Norms in the trade-off analysis since approximately version 2.
This is the current method as at Aug 2014.
The official EffTox software used inverse quadratics in the trade-off analysis from inception until
approximately version 2. The method was ditched in favour of using L^p norms.
This is the current method as at Aug 2014.
For more information, consult the original EffTox paper (2004), the Cook update (2006) and the Bayesian book.
"""
def __init__(self, points):
"""
Params:
Points, list of points in (prob_eff, prob_tox) tuple pairs.
"""
x = np.array([z for z,_ in points])
y = np.array([z for _,z in points])
z = 1/x
import statsmodels.api as sm
lm = sm.OLS(y, np.column_stack((np.ones_like(z), z, z**2))).fit()
a, b, c = lm.params
f = lambda x: a + b/x + c/x**2
# Check f is not a terrible fit
if sum(np.abs(f(x) - y)) > 0.00001:
ValueError('%s do not fit an ABC curve well' % points)
self.f = f
self.a, self.b, self.c = a, b, c
def __call__(self, prob_eff, prob_tox):
x = prob_eff
y = prob_tox
if 0 < x < 1 and 0 < y < 1:
gradient = 1.0 * y / (x-1)
def intersection_expression(x, m, f):
return m*(x-1) - f(x)
x_00 = brentq(intersection_expression, 0.0001, 1, args=(gradient, self.f))
y_00 = self.f(x_00)
d1 = np.sqrt((x_00-1)**2 + y_00**2)
d2 = np.sqrt((x-1)**2 + y**2)
return d1 / d2 - 1
else:
return np.nan
def solve(self, prob_eff=None, prob_tox=None, delta=0):
""" Specify exactly one of prob_eff or prob_tox and this will return the other, for given delta"""
# TODO
raise NotImplementedError()
def plot_contours(self, use_ggplot=False, prior_eff_probs=None, prior_tox_probs=None, n=1000,
util_lower=-0.8, util_upper=0.8, util_delta=0.2, title='EffTox utility contours'):
"""
:param use_ggplot: True to use ggplot, False to use matplotlib
:type use_ggplot: bool
:param prior_eff_probs: optional
:type prior_eff_probs: list
:param prior_tox_probs:
:type prior_tox_probs: list
:param n: number of points per line
:type n: int
:param util_lower: lowest utility value to plot contour for
:type util_lower: float
:param util_upper: lowest utility value to plot contour for
:type util_upper: float
:param util_delta: plot contours for each increment in utility
:type util_delta: float
:return: plot of efficacy-toxicity contours
"""
raise NotImplementedError()
# I used to call the InverseQuadraticCurve an ABC_Curve because it uses three parameters, a, b and c.
# Similarly, I used to call the LpNormCurve a HingedCurve because it uses a hinge point.
# Mask those for backwards compatability in my code.
HingedCurve = LpNormCurve
ABC_Curve = InverseQuadraticCurve
class EffTox(EfficacyToxicityDoseFindingTrial):
""" This is an object-oriented implementation of Thall & Cook's EffTox trial design.
See Thall, P.F. & Cook, J.D. (2004) - Dose-Finding Based on Efficacy-Toxicity Trade-Offs
e.g. general usage
(for now, parameter means and standard deviations were fetched from MD Anderson's EffTox software. TODO)
>>> real_doses = [7.5, 15, 30, 45]
>>> tox_cutoff = 0.40
>>> eff_cutoff = 0.45
>>> tox_certainty = 0.05
>>> eff_certainty = 0.05
>>> mu_t_mean, mu_t_sd = -5.4317, 2.7643
>>> beta_t_mean, beta_t_sd = 3.1761, 2.7703
>>> mu_e_mean, mu_e_sd = -0.8442, 1.9786
>>> beta_e_1_mean, beta_e_1_sd = 1.9857, 1.9820
>>> beta_e_2_mean, beta_e_2_sd = 0, 0.2
>>> psi_mean, psi_sd = 0, 1
>>> from scipy.stats import norm
>>> theta_priors = [
... norm(loc=mu_t_mean, scale=mu_t_sd),
... norm(loc=beta_t_mean, scale=beta_t_sd),
... norm(loc=mu_e_mean, scale=mu_e_sd),
... norm(loc=beta_e_1_mean, scale=beta_e_1_sd),
... norm(loc=beta_e_2_mean, scale=beta_e_2_sd),
... norm(loc=psi_mean, scale=psi_sd),
... ]
>>> hinge_points = [(0.4, 0), (1, 0.7), (0.5, 0.4)]
>>> metric = LpNormCurve(hinge_points[0][0], hinge_points[1][1], hinge_points[2][0], hinge_points[2][1])
>>> trial = EffTox(real_doses, theta_priors, tox_cutoff, eff_cutoff, tox_certainty, eff_certainty, metric,
... max_size=30, first_dose=3)
>>> trial.next_dose()
3
>>> trial.update([(3, 0, 1), (3, 1, 1), (3, 0, 0)])
4
>>> trial.has_more()
True
>>> trial.size(), trial.max_size()
(3, 30)
"""
def __init__(self, real_doses, theta_priors, tox_cutoff, eff_cutoff,
tox_certainty, eff_certainty, metric, max_size, first_dose=1,
avoid_skipping_untried_escalation=True, avoid_skipping_untried_deescalation=True,
num_integral_steps=10**5):
"""
Params:
:param real_doses: list of actual doses. E.g. for 10mg and 25mg, use [10, 25].
:type real_doses: list
:param theta_priors: list of prior distributions corresponding to mu_T, beta_T, mu_E, beta1_E, beta2_E, psi
respectively. Each prior object should support obj.ppf(x) and obj.pdf(x)
:type theta_priors: list
:param tox_cutoff: the maximum acceptable probability of toxicity
:type tox_cutoff: float
:param eff_cutoff: the minimium acceptable probability of efficacy
:type eff_cutoff: float
:param tox_certainty: the posterior certainty required that toxicity is less than cutoff
:type tox_certainty: float
:param eff_certainty: the posterior certainty required that efficacy is greater than than cutoff
:type eff_certainty: float
:param metric: instance of LpNormCurve or InverseQuadraticCurve, used to calculate utility
of efficacy/toxicity probability pairs.
:type metric: LpNormCurve
:param max_size: maximum number of patients to use
:type max_size: int
:param first_dose: starting dose level, 1-based. I.e. intcpt=3 means the middle dose of 5.
:type first_dose: int
:param avoid_skipping_untried_escalation: True to avoid skipping untried doses in escalation
:type avoid_skipping_untried_escalation: bool
:param avoid_skipping_untried_deescalation: True to avoid skipping untried doses in de-escalation
:type avoid_skipping_untried_deescalation: bool
:param num_integral_steps: number of points to use in Monte Carlo integration.
:type num_integral_steps: int
Note: dose_allocation_mode has been suppressed. Remove once I know it is not needed. KB
# Instances have a dose_allocation_mode property that is set according to this schedule:
# 0, when no dose has been chosen
# 1, when optimal dose is selected from non-trivial admissable set (this is normal operation)
# 2, when next untried dose is selected to avoid skipping doses
# 2.5, when dose is maintained because ideal dose would require skipping and intervening dose is inadmissable
# 3, when admissable set is empty so lowest untried dose above starting dose that is probably tolerable is
# selected
# 4, when admissable set is empty and there is no untested dose above first dose to try
# 5, when admissable set is empty and all doses were probably too toxic
# 6, when admissable set is not-empty but trial stops because 1) ideal dose requires skipping; 2) next
# -best dose is inadmissable; and 3) current dose is inadmissable. I question the validity of this
# scenario because avoiding stopping was possible. However, I am trying to faithfully reproduce
# the MD Anderson software so this scenario is programmed in. That may change. If you want to avoid
# this outcome, allow dose skipping.
"""
EfficacyToxicityDoseFindingTrial.__init__(self, first_dose, len(real_doses), max_size)
if len(theta_priors) != 6:
raise ValueError('theta_priors should have 6 items.')
self.real_doses = real_doses
self._scaled_doses = np.log(real_doses) - np.mean(np.log(real_doses))
self.priors = theta_priors
self.tox_cutoff = tox_cutoff
self.eff_cutoff = eff_cutoff
self.tox_certainty = tox_certainty
self.eff_certainty = eff_certainty
self.metric = metric
self.avoid_skipping_untried_escalation = avoid_skipping_untried_escalation
self.avoid_skipping_untried_deescalation = avoid_skipping_untried_deescalation
self.num_integral_steps = num_integral_steps
# Reset
self.reset()
def _update_integrals(self, n=None):
""" Method to recalculate integrals, thus updating probabilties of eff and tox, utilities, and
admissable set.
"""
if n is None:
n = self.num_integral_steps
cases = list(zip(self._doses, self._toxicities, self._efficacies))
post_probs, _pds = efftox_get_posterior_probs(cases, self.priors, self._scaled_doses, self.tox_cutoff,
self.eff_cutoff, n)
prob_tox, prob_eff, prob_acc_tox, prob_acc_eff = zip(*post_probs)
admissable = np.array([(x >= self.tox_certainty and y >= self.eff_certainty) # Probably acceptable tox & eff
or (i==self.maximum_dose_given() and x >= self.tox_certainty) # lowest untried dose above
# starting dose and
# probably acceptable tox
for i, (x, y) in enumerate(zip(prob_acc_tox, prob_acc_eff))])
admissable_set = [i+1 for i, x in enumerate(admissable) if x]
# Beware: I normally use (tox, eff) pairs but the metric expects (eff, tox) pairs, driven
# by the equation form that Thall & Cook chose.
utility = np.array([self.metric(x[0], x[1]) for x in zip(prob_eff, prob_tox)])
self.prob_tox = prob_tox
self.prob_eff = prob_eff
self.prob_acc_tox = prob_acc_tox
self.prob_acc_eff = prob_acc_eff
self._admissable_set = admissable_set
self.utility = utility
self.pds = _pds
def _EfficacyToxicityDoseFindingTrial__calculate_next_dose(self, n=None):
if n is None:
n = self.num_integral_steps
self._update_integrals(n)
if self.treated_at_dose(self.first_dose()) > 0:
# First dose has been tried so modelling may commence
max_dose_given = self.maximum_dose_given()
min_dose_given = self.minimum_dose_given()
for i in np.argsort(-self.utility): # dose-indices from highest to lowest utility
dose_level = i+1
if dose_level in self.admissable_set():
if self.avoid_skipping_untried_escalation and max_dose_given and dose_level - max_dose_given > 1:
pass # No skipping
elif self.avoid_skipping_untried_deescalation and min_dose_given and min_dose_given - dose_level > 1:
pass # No skipping
else:
self._status = 1
self._next_dose = dose_level
break
else:
# No dose can be selected
self._next_dose = -1
self._status = -1
else:
# First dose not given yet, so keep recommending that, like EffTox software does
self._next_dose = self.first_dose()
if self.size() > 0:
self._status = -10
else:
self._status = 0
return self._next_dose
def _EfficacyToxicityDoseFindingTrial__reset(self):
""" Opportunity to run implementation-specific reset operations. """
self.prob_tox = []
self.prob_eff = []
self.prob_acc_tox = []
self.prob_acc_eff = []
self._admissable_set = []
self.utility = []
def has_more(self):
return EfficacyToxicityDoseFindingTrial.has_more(self)
def tabulate(self):
# import pandas as pd
# tab_data = OrderedDict()
# treated_at_dose = [self.treated_at_dose(d) for d in self.dose_levels()]
# eff_at_dose = [self.efficacies_at_dose(d) for d in self.dose_levels()]
# tox_at_dose = [self.toxicities_at_dose(d) for d in self.dose_levels()]
# tab_data['Dose'] = self.dose_levels()
# tab_data['N'] = treated_at_dose
# tab_data['Efficacies'] = eff_at_dose
# tab_data['Toxicities'] = tox_at_dose
# df = pd.DataFrame(tab_data)
# df['EffRate'] = np.where(df.N > 0, df.Efficacies / df.N, np.nan)
# df['ToxRate'] = np.where(df.N > 0, df.Toxicities / df.N, np.nan)
df = EfficacyToxicityDoseFindingTrial.tabulate(self)
df['P(Eff)'] = self.prob_eff
df['P(Tox)'] = self.prob_tox
df['P(AccEff)'] = self.prob_acc_eff
df['P(AccTox)'] = self.prob_acc_tox
df['Admissible'] = self.dose_admissability()
df['Utility'] = self.utility
return df
def posterior_params(self, n=None):
""" Get posterior parameter estimates """
if n is None:
n = self.num_integral_steps
cases = list(zip(self._doses, self._toxicities, self._efficacies))
post_params, pds = efftox_get_posterior_params(cases, self.priors, self._scaled_doses, n)
return post_params
def optimal_decision(self, prob_tox, prob_eff):
""" Get the optimal dose choice for a given dose-toxicity curve.
.. note:: Ken Cheung (2014) presented the idea that the optimal behaviour of a dose-finding
design can be calculated for a given set of patients with their own specific tolerances by
invoking the dose decicion on the complete (and unknowable) toxicity and efficacy curves.
:param prob_tox: collection of toxicity probabilities
:type prob_tox: list
:param prob_tox: collection of efficacy probabilities
:type prob_tox: list
:return: the optimal (1-based) dose decision
:rtype: int
"""
admiss, u, u_star, obd, u_cushion = solve_metrizable_efftox_scenario(prob_tox, prob_eff, self.metric,
self.tox_cutoff, self.eff_cutoff)
return obd
def scaled_doses(self):
return self._scaled_doses
def _post_density_plot(self, func=None, x_name='', plot_title='', include_doses=None, boot_samps=1000):
from ggplot import aes, ggplot, geom_density, ggtitle
import pandas as pd
if include_doses is None:
include_doses = range(1, self.num_doses + 1)
def my_func(x, samp):
tox_probs = _pi_T(x, mu=samp[:, 0], beta=samp[:, 1])
eff_probs = _pi_E(x, mu=samp[:, 2], beta1=samp[:, 3], beta2=samp[:, 4])
u = self.metric(eff_probs, tox_probs)
return u
if func is None:
func = my_func
x_boot = []
dose_indices = []
samp = self.pds._samp
p = self.pds._probs
p /= p.sum()
for i, x in enumerate(self.scaled_doses()):
dose_index = i+1
if dose_index in include_doses:
x = func(x, samp)
x_boot.extend(np.random.choice(x, size=boot_samps, replace=True, p=p))
dose_indices.extend(np.repeat(dose_index, boot_samps))
df = pd.DataFrame({x_name: x_boot, 'Dose': dose_indices})
return ggplot(aes(x=x_name, fill='Dose'), data=df) + geom_density(alpha=0.6) + ggtitle(plot_title)
def plot_posterior_tox_prob_density(self, include_doses=None, boot_samps=1000):
""" Plot the posterior densities of the dose probabilities of toxicity.
.. note:: this method uses ggplot so that must be available on your system.
Why ggplot and not matplotlib? Because I also use R so I prefer the commonality of ggplot.
The length of time this method takes will be linked to number of points in last call to update().
It is relatively slow so don't go nuts.
:param include_doses: optional, list of dose levels to include, e.g. [1,2]. Default is None for all doses.
:type include_doses: list
:return: ggplot device
:rtype: ggplot
"""
def get_prob_tox(x, samp):
tox_probs = _pi_T(x, mu=samp[:, 0], beta=samp[:, 1])
return tox_probs
return self._post_density_plot(func=get_prob_tox, x_name='Prob(Toxicity)',
plot_title='Posterior densities of Prob(Toxicity)',
include_doses=include_doses, boot_samps=boot_samps)
def plot_posterior_eff_prob_density(self, include_doses=None, boot_samps=1000):
""" Plot the posterior densities of the dose probabilities of efficacy.
.. note:: this method uses ggplot so that must be available on your system.
Why ggplot and not matplotlib? Because I also use R so I prefer the commonality of ggplot.
The length of time this method takes will be linked to number of points in last call to update().
It is relatively slow so don't go nuts.
:param include_doses: optional, list of dose levels to include, e.g. [1,2]. Default is None for all doses.
:type include_doses: list
:return: ggplot device
:rtype: ggplot
"""
def get_prob_eff(x, samp):
eff_probs = _pi_E(x, mu=samp[:, 2], beta1=samp[:, 3], beta2=samp[:, 4])
return eff_probs
return self._post_density_plot(func=get_prob_eff, x_name='Prob(Efficacy)',
plot_title='Posterior densities of Prob(Efficacy)',
include_doses=include_doses, boot_samps=boot_samps)
def plot_posterior_utility_density(self, include_doses=None, boot_samps=1000):
""" Plot the posterior densities of the dose utilities.
.. note:: this method uses ggplot so that must be available on your system.
Why ggplot and not matplotlib? Because I also use R so I prefer the commonality of ggplot.
The length of time this method takes will be linked to number of points in last call to update().
It is relatively slow so don't go nuts.
:param include_doses: optional, list of dose levels to include, e.g. [1,2]. Default is None for all doses.
:type include_doses: list
:return: ggplot device
:rtype: ggplot
"""
def get_utility(x, samp):
tox_probs = _pi_T(x, mu=samp[:, 0], beta=samp[:, 1])
eff_probs = _pi_E(x, mu=samp[:, 2], beta1=samp[:, 3], beta2=samp[:, 4])
u = self.metric(eff_probs, tox_probs)
return u
return self._post_density_plot(func=get_utility, x_name='Utility', plot_title='Posterior densities of Utility',
include_doses=include_doses, boot_samps=boot_samps)
def prob_superior_utility(self, dl1, dl2):
""" Returns the probability that the utility of dose-level 1 (dl1) exceeds that of dose-level 2 (dl2)
:param dl1: 1-based dose-level of dose 1
:type dl1: int
:param dl2: 1-based dose-level of dose 2
:type dl2: int
:return: probability that utility of dose-level 1 exceeds that of dose-level 2
:rtype: float
"""
if dl1 == dl2:
return 0
samp = self.pds._samp
p = self.pds._probs
p /= p.sum()
x1 = self.scaled_doses()[dl1-1]
x1_tox_probs = _pi_T(x1, mu=samp[:, 0], beta=samp[:, 1])
x1_eff_probs = _pi_E(x1, mu=samp[:, 2], beta1=samp[:, 3], beta2=samp[:, 4])
u1 = self.metric(x1_eff_probs, x1_tox_probs)
x2 = self.scaled_doses()[dl2-1]
x2_tox_probs = _pi_T(x2, mu=samp[:, 0], beta=samp[:, 1])
x2_eff_probs = _pi_E(x2, mu=samp[:, 2], beta1=samp[:, 3], beta2=samp[:, 4])
u2 = self.metric(x2_eff_probs, x2_tox_probs)
return np.sum(p * (u1 > u2))
def utility_superiority_matrix(self):
superiority_mat = np.zeros((4, 4))
superiority_mat[:] = np.nan
for i in range(1, self.num_doses+1):
for j in range(i+1, self.num_doses+1):
p = self.prob_superior_utility(i, j)
superiority_mat[i-1, j-1] = p
superiority_mat[j-1, i-1] = 1-p
return superiority_mat
def solve_metrizable_efftox_scenario(prob_tox, prob_eff, metric, tox_cutoff, eff_cutoff):
""" Solve a metrizable efficacy-toxicity dose-finding scenario.
Metrizable means that the priority of doses can be calculated using a metric.
A dose is conformative if it has probability of toxicity less than some cutoff; and
probability of efficacy greater than some cutoff.
The OBD is the dose with the highest utility in the conformative set. The OBD does not
necessarily have a positive utility.
This function returns, as a 5-tuple, (an array of bools representing whether each dose is conformative, the array
of utlities, the utility of the optimal dose, the 1-based OBD level, and the utility distance from the OBD to the
next most preferable dose in the conformative set where there are several conformative doses)
:param prob_tox: Probabilities of toxicity at each dose
:type prob_tox: iterable
:param prob_eff: Probabilities of efficacy at each dose
:type prob_eff: iterable
:param metric: Metric to score
:type metric: class like clintrials.dosefinding.efftox.LpNormCurve or func(prob_eff, prob_tox) returning float
:param tox_cutoff: maximum acceptable toxicity probability
:type tox_cutoff: float
:param eff_cutoff: minimum acceptable efficacy probability
:type eff_cutoff: float
"""
if len(prob_tox) != len(prob_eff):
raise Exception('prob_tox and prob_eff should be lists or tuples of the same length.')
t = prob_tox
r = prob_eff
# Probabilities of 0.0 and 1.0 in the prob_tox and eff vectors cause problems when calculating utilities.
# Being pragmatic, the easiest way to deal with them is to swap them for some number that is
# nearly 0.0 or 1.0
t = np.where(t <= 0, 0.001, t)
t = np.where(t >= 1, 0.999, t)
r = np.where(r <= 0, 0.001, r)
r = np.where(r >= 1, 0.999, r)
conform = np.array([(eff >= eff_cutoff) and (tox <= tox_cutoff) for eff, tox in zip(r, t)])
util = np.array([metric(eff, tox) for eff, tox in zip(r, t)])
conform_util = np.where(conform, util, -np.inf)
if np.all(np.isnan(util)):
logging.warn('All NaN util encountered in solve_metrizable_efftox_scenario')
return conform, util, np.nan, -1, np.nan
elif np.all(np.isnan(conform_util)):
logging.warn('All NaN conform_util encountered in solve_metrizable_efftox_scenario')
return conform, util, np.nan, -1, np.nan
else:
if sum(conform) >= 2:
obd = np.nanargmax(conform_util)+1
u2, u1 = np.sort(conform_util)[-2:]
u_cushion = u1 - u2
return conform, util, u1, obd, u_cushion
elif sum(conform) >= 1:
obd = np.nanargmax(conform_util)+1
u1 = np.nanmax(conform_util)
return conform, util, u1, obd, np.nan
# Default:
return conform, util, np.nan, -1, np.nan
def get_obd(tox_curve, eff_curve, metric, tox_cutoff, eff_cutoff):
X = solve_metrizable_efftox_scenario(tox_curve, eff_curve, metric,
tox_cutoff, eff_cutoff)
conform, util, u_star, obd, u_cushion = X
return obd
def get_conformative_doses(tox_curve, eff_curve, metric, tox_cutoff, eff_cutoff):
X = solve_metrizable_efftox_scenario(tox_curve, eff_curve, metric,
tox_cutoff, eff_cutoff)
conform, util, u_star, obd, u_cushion = X
return [int(x) for x in conform]
def get_util(tox_curve, eff_curve, metric, tox_cutoff, eff_cutoff):
X = solve_metrizable_efftox_scenario(tox_curve, eff_curve, metric,
tox_cutoff, eff_cutoff)
conform, util, u_star, obd, u_cushion = X
return np.round(util, 2)
def classify_problem(delta, prob_tox, prob_eff, metric, tox_cutoff, eff_cutoff, text_label=True):
X = solve_metrizable_efftox_scenario(prob_tox, prob_eff, metric, tox_cutoff, eff_cutoff)
conform, util, u_star, obd, u_cushion = X
dose_within_delta = np.array([u >= (1-delta)*u_star for u in util])
if obd == -1:
if text_label:
return 'Stop'
else:
return 1
elif sum(dose_within_delta) == 1:
if text_label:
return 'Optimal'
else:
return 2
else:
if text_label:
return 'Desirable'
else:
return 3
def get_problem_class(delta, tox_curve, eff_curve, metric, tox_cutoff, eff_cutoff):
return classify_problem(delta, tox_curve, eff_curve, metric, tox_cutoff, eff_cutoff)
def classify_tox_class(prob_tox, tox_cutoff, text_label=True):
prob_tox = np.array(prob_tox)
if sum(prob_tox < tox_cutoff) == len(prob_tox):
if text_label:
return 'Tolerable'
else:
return 1
elif sum(prob_tox > tox_cutoff) == len(prob_tox):
if text_label:
return 'Toxic'
else:
return 2
else:
if text_label:
return 'Mixed'
else:
return 3
def get_tox_class(tox_curve, tox_cutoff):
prob_tox = tox_curve
return classify_tox_class(prob_tox, tox_cutoff)
def classify_eff_class(prob_eff, eff_cutoff, text_label=True):
prob_eff = np.array(prob_eff)
max_eff = np.max(prob_eff)
if np.all([prob_eff[i] > prob_eff[i-1] for i in range(1, len(prob_eff))]):
if text_label:
return 'Monotonic'
else:
return 1
elif sum(prob_eff == max_eff) == 1:
if text_label:
return 'Unimodal'
else:
return 2
elif sum(prob_eff == max_eff) > 1:
if text_label:
return 'Plateau'
else:
return 3
else:
if text_label:
return 'Weird'
else:
return 4
def get_eff_class(eff_curve, eff_cutoff):
prob_eff = eff_curve
return classify_eff_class(prob_eff, eff_cutoff)
def efftox_dtp_detail(trial):
""" Performs the EffTox-specific extra reporting when calculating DTPs
:param trial: instance of EffTox
:return: OrderedDict
"""
to_return = OrderedDict()
# Utility
to_return['Utility'] = iterable_to_json(trial.utility)
for i, dl in enumerate(trial.dose_levels()):
to_return['Utility{}'.format(dl)] = trial.utility[i]
# Prob(Eff)
to_return['ProbEff'] = iterable_to_json(trial.prob_eff)
for i, dl in enumerate(trial.dose_levels()):
to_return['ProbEff{}'.format(dl)] = trial.prob_eff[i]
# Prob(Acceptable Eff)
to_return['ProbAccEff'] = iterable_to_json(trial.prob_acc_eff)
for i, dl in enumerate(trial.dose_levels()):
to_return['ProbAccEff{}'.format(dl)] = trial.prob_acc_eff[i]
# Prob(Tox)
to_return['ProbTox'] = iterable_to_json(trial.prob_tox)
for i, dl in enumerate(trial.dose_levels()):
to_return['ProbTox{}'.format(dl)] = trial.prob_tox[i]
# Prob(Acceptable Eff)
to_return['ProbAccTox'] = iterable_to_json(trial.prob_acc_tox)
for i, dl in enumerate(trial.dose_levels()):
to_return['ProbAccTox{}'.format(dl)] = trial.prob_acc_tox[i]
# What is the probability that the utility of the top dose exceeds that of the next best dose?
# I.e. how confident are we that OBD really is the shizzle?
# u1_dose_index, u2_dose_index = np.argsort(-trial.utility)[:2]
sup_mat = trial.utility_superiority_matrix()
to_return['SuperiorityMatrix'] = [iterable_to_json(x) for x in sup_mat]
obd = trial.next_dose()
if obd > 0:
min_sup = np.nanmin(sup_mat[obd-1])
else:
min_sup = np.nan
to_return['MinProbSuperiority'] = atomic_to_json(min_sup)
return to_return | gpl-3.0 |
idontgetoutmuch/ParkingWestminster | load_SJWHS.py | 1 | 2784 | import pandas as pd
from pandas import DataFrame, Series
import numpy as np
import os
import csv
names = ["amount paid", "paid duration mins", "start date", "start day", "end date", "end day", "start time", "end time", "DesignationType", "Hours of Control", "Tariff", "Max Stay", "Spaces", "Street", "x coordinate", "y coordinate", "latitude", "longitude"]
#df = pd.read_csv('Data\ParkingCashlessDenorm.csv', names =names)
# pick out St Johns Wood High Street
#SJWHS = df[df['Street'] == 'St John\'s Wood High Street']
def get_occupancy(df, x_coord):
# pick an arbitrary Tues
SWTues = df[(df['start date'] == '2013-04-30 00:00:00') & (df['x coordinate'] == x_coord)]
SWTues['start date'] = pd.to_datetime(SWTues['start date'])
SWTues['end date'] = pd.to_datetime(SWTues['end date'])
# make a datetime for today at midnight
ts_now=pd.to_datetime("2013-10-05")
SWTues['start time'] = pd.to_datetime(SWTues['start time']) # defaults to today (5th Oct) with correct hour:min
SWTues['start time'] =SWTues['start time'] - ts_now # get relative difference, leaves a timedelta
SWTues['start datetime'] = SWTues['start time'] + SWTues['start date'] # combine date with midnight & timedelta to get new datetime
SWTues['end time'] = pd.to_datetime(SWTues['end time']) # defaults to today (5th Oct) with correct hour:min
SWTues['end time'] =SWTues['end time'] - ts_now # get relative difference, leaves a timedelta
SWTues['end datetime'] = SWTues['end time'] + SWTues['end date'] # combine date with midnight & timedelta to get new datetime
s1 = Series(np.ones(SWTues.shape[0]), index=SWTues['start datetime'])
s2 = Series(-1*np.ones(SWTues.shape[0]), index=SWTues['end datetime'])
mean_occ = s1.append(s2).sort_index().cumsum().mean()
max_occ = SWTues['Spaces'].max()
tariff = SWTues['Tariff'].max()
max_stay = SWTues['Max Stay'].max()
total_takings = SWTues['amount paid'].sum()
occ_prop = mean_occ/max_occ
return mean_occ, max_occ, occ_prop, tariff, max_stay, total_takings
def get_all_occupancies(df):
with open("occupancy_by_lat_long.csv", "w") as f:
writer = csv.writer(f)
out = {}
dfgb = df.groupby(['latitude','longitude','x coordinate'])
sgb = dfgb['Spaces']
mapping_of_lat_long_to_spaces = sgb.mean()
dict_of_lat_long_to_spaces = dict(mapping_of_lat_long_to_spaces)
print len(mapping_of_lat_long_to_spaces)
for key_nbr, (lat, lng, place) in enumerate(mapping_of_lat_long_to_spaces.keys()):
print key_nbr
mean_occ, max_occ, occ_prop, tariff, max_stay, total_takings = get_occupancy(df, place)
#print mean_occ, max_occ, occ_prop, min(occ_prop,1.0)
writer.writerow([lat, lng, mean_occ, max_occ, occ_prop, min(occ_prop,1.0), tariff, max_stay, total_takings])
| apache-2.0 |
NMGRL/pychron | pychron/mv/locator.py | 2 | 28476 | # Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import time
from traits.api import Float
# ============= standard library imports ========================
from numpy import array, histogram, argmax, zeros, asarray, ones_like, \
nonzero, max, arange, argsort, invert, median, mean, zeros_like
from operator import attrgetter
from skimage.morphology import watershed
from skimage.draw import polygon, circle, circle_perimeter, circle_perimeter_aa
from scipy import ndimage
from skimage.exposure import rescale_intensity
from skimage.filters import gaussian
from skimage import feature
# ============= local library imports ==========================
from pychron.loggable import Loggable
from pychron.mv.segment.region import RegionSegmenter
from pychron.image.cv_wrapper import grayspace, draw_contour_list, contour, \
colorspace, get_polygons, get_size, new_point, draw_rectangle, \
draw_lines, \
draw_polygons, crop
from pychron.mv.target import Target
from pychron.core.geometry.geometry import approximate_polygon_center, \
calc_length
def _coords_inside_image(rr, cc, shape):
mask = (rr >= 0) & (rr < shape[0]) & (cc >= 0) & (cc < shape[1])
return rr[mask], cc[mask]
def draw_circle(frame, center_x, center_y, radius, color, **kw):
cy, cx = circle(int(center_y), int(center_x), int(radius), shape=frame.shape)
frame[cy, cx] = color
def draw_circle_perimeter(frame, center_x, center_y, radius, color):
cy, cx = circle_perimeter(int(center_y), int(center_x), int(radius))
cy, cx = _coords_inside_image(cy, cx, frame.shape)
frame[cy, cx] = color
class Locator(Loggable):
pxpermm = Float
use_histogram = False
use_arc_approximation = True
use_square_approximation = True
step_signal = None
pixel_depth = 255
def wait(self):
if self.step_signal:
self.step_signal.wait()
self.step_signal.clear()
def crop(self, src, cw, ch, ox=0, oy=0, verbose=True):
cw_px = int(cw * self.pxpermm)
ch_px = int(ch * self.pxpermm)
w, h = get_size(src)
x = int((w - cw_px) / 2. + ox)
y = int((h - ch_px) / 2. - oy)
# r = 4 - cw_px % 4
# cw_px = ch_px = cw_px + r
if verbose:
self.debug('Crop: x={},y={}, cw={}, ch={}, '
'width={}, height={}, ox={}, oy={}'.format(x, y, cw_px, ch_px, w, h, ox, oy))
return asarray(crop(src, x, y, cw_px, ch_px))
def find(self, image, frame, dim, shape='circle', **kw):
"""
image is a stand alone image
dim = float. radius or half length of a square in pixels
find the hole in the image
return the offset from the center of the image
0. image is alredy cropped
1. find polygons
"""
dx, dy = None, None
targets = self._find_targets(image, frame, dim, shape=shape, **kw)
if targets:
self.info('found {} potential targets'.format(len(targets)))
# draw center indicator
src = image.source_frame
self._draw_center_indicator(src, size=2, shape='rect', radius=round(dim))
# draw targets
self._draw_targets(src, targets)
if shape == 'circle':
if self.use_arc_approximation:
# calculate circle_minimization position
dx, dy = self._arc_approximation(src, targets[0], dim)
else:
dx, dy = self._calculate_error(targets)
else:
dx, dy = self._calculate_error(targets)
# if self.use_square_approximation:
# dx, dy = self._square_approximation(src, targets[0], dim)
# image.set_frame(src[:])
self.info('dx={}, dy={}'.format(dx, dy))
return dx, dy
def _find_targets(self, image, frame, dim, shape='circle',
search=None, preprocess=True,
filter_targets=True,
convexity_filter=False,
mask=False,
set_image=True, inverted=False):
"""
use a segmentor to segment the image
"""
if search is None:
search = {}
if preprocess:
if not isinstance(preprocess, dict):
preprocess = {}
src = self._preprocess(frame, **preprocess)
else:
src = grayspace(frame)
if src is None:
print('Locator: src is None')
return
if mask:
self._mask(src, mask)
if inverted:
src = invert(src)
start = search.get('start')
if start is None:
w = search.get('width', 10)
start = int(mean(src[src > 0])) - search.get('start_offset_scalar', 3) * w
step = search.get('step', 2)
n = search.get('n', 20)
blocksize_step = search.get('blocksize_step', 5)
seg = RegionSegmenter(use_adaptive_threshold=search.get('use_adaptive_threshold', False),
blocksize=search.get('blocksize', 20))
fa = self._get_filter_target_area(shape, dim)
phigh, plow = None, None
for j in range(n):
ww = w * (j + 1)
self.debug('start intensity={}, width={}'.format(start, ww))
for i in range(n):
low = max((0, start + i * step - ww))
high = max((1, min((255, start + i * step + ww))))
if inverted:
low = 255-low
high = 255-high
seg.threshold_low = low
seg.threshold_high = high
if seg.threshold_low == plow and seg.threshold_high == phigh:
break
plow = seg.threshold_low
phigh = seg.threshold_high
nsrc = seg.segment(src)
seg.blocksize += blocksize_step
nf = colorspace(nsrc)
# draw contours
targets = self._find_polygon_targets(nsrc, frame=nf)
if set_image and image is not None:
image.set_frame(nf)
if targets:
# filter targets
if filter_targets:
targets = self._filter_targets(image, frame, dim, targets, fa)
elif convexity_filter:
# for t in targets:
# print t.convexity, t.area, t.min_enclose_area, t.perimeter_convexity
targets = [t for t in targets if t.perimeter_convexity > convexity_filter]
if targets:
return sorted(targets, key=attrgetter('area'), reverse=True)
# time.sleep(0.5)
def _mask(self, src, radius=None):
radius *= self.pxpermm
h, w = src.shape[:2]
c = circle(h / 2., w / 2., radius, shape=(h, w))
mask = ones_like(src, dtype=bool)
mask[c] = False
src[mask] = 0
return invert(mask)
# ===============================================================================
# filter
# ===============================================================================
def _filter_targets(self, image, frame, dim, targets, fa, threshold=0.85):
"""
filter targets using the _filter_test function
return list of Targets that pass _filter_test
"""
ts = [self._filter_test(image, frame, ti, dim, threshold, fa[0], fa[1])
for ti in targets]
return [ta[0] for ta in ts if ta[1]]
def _filter_test(self, image, frame, target, dim, cthreshold, mi, ma):
"""
if the convexity of the target is <threshold try to do a watershed segmentation
make black image with white polygon
do watershed segmentation
find polygon center
"""
ctest, centtest, atest = self._test_target(frame, target,
cthreshold, mi, ma)
# print('ctest', ctest, cthreshold, 'centtest', centtest, 'atereat', atest, mi, ma)
result = ctest and atest and centtest
if not ctest and (atest and centtest):
target = self._segment_polygon(image, frame,
target,
dim,
cthreshold, mi, ma)
result = True if target else False
return target, result
def _test_target(self, frame, ti, cthreshold, mi, ma):
# print('converasdf', ti.convexity, 'ara', ti.area)
ctest = ti.convexity > cthreshold
centtest = self._near_center(ti.centroid, frame)
atest = ma > ti.area > mi
return ctest, centtest, atest
def _find_polygon_targets(self, src, frame=None):
src, contours, hieararchy = contour(src)
# contours, hieararchy = find_contours(src)
# convert to color for display
if frame is not None:
draw_contour_list(frame, contours, hieararchy)
# do polygon approximation
origin = self._get_frame_center(src)
pargs = get_polygons(src, contours, hieararchy)
return self._make_targets(pargs, origin)
def _segment_polygon(self, image, frame, target, dim, cthreshold, mi, ma):
src = frame[:]
wh = get_size(src)
# make image with polygon
im = zeros(wh)
points = asarray(target.poly_points)
rr, cc = polygon(*points.T)
im[cc, rr] = 255
# do watershedding
distance = ndimage.distance_transform_edt(im)
local_maxi = feature.peak_local_max(distance, labels=im, indices=False)
markers, ns = ndimage.label(local_maxi)
wsrc = watershed(-distance, markers, mask=im)
wsrc = wsrc.astype('uint8')
# self.test_image.setup_images(3, wh)
# self.test_image.set_image(distance, idx=0)
# self.test_image.set_image(wsrc, idx=1)
# self.wait()
targets = self._find_polygon_targets(wsrc)
ct = cthreshold * 0.75
target = self._test_targets(wsrc, targets, ct, mi, ma)
if not target:
values, bins = histogram(wsrc, bins=max((10, ns)))
# assume 0 is the most abundant pixel. ie the image is mostly background
values, bins = values[1:], bins[1:]
idxs = nonzero(values)[0]
'''
polygon is now segmented into multiple regions
consectutively remove a region and find targets
'''
nimage = ones_like(wsrc, dtype='uint8') * 255
nimage[wsrc == 0] = 0
for idx in idxs:
bl = bins[idx]
bu = bins[idx + 1]
nimage[((wsrc >= bl) & (wsrc <= bu))] = 0
targets = self._find_polygon_targets(nimage)
target = self._test_targets(nimage, targets, ct, mi, ma)
if target:
break
return target
def _test_targets(self, src, targets, ct, mi, ma):
if targets:
for ti in targets:
if all(self._test_target(src,
ti, ct, mi, ma)):
return ti
# ===============================================================================
# preprocessing
# ===============================================================================
def _preprocess(self, frame, stretch_intensity=True, blur=1, denoise=0):
"""
1. convert frame to grayscale
2. remove noise from frame. increase denoise value for more noise filtering
3. stretch contrast
"""
if len(frame.shape) != 2:
frm = grayspace(frame) * 255
else:
frm = frame / self.pixel_depth * 255
frm = frm.astype('uint8')
# self.preprocessed_frame = frame
# if denoise:
# frm = self._denoise(frm, weight=denoise)
# print 'gray', frm.shape
if blur:
frm = gaussian(frm, blur) * 255
frm = frm.astype('uint8')
# frm1 = gaussian(self.preprocessed_frame, blur,
# multichannel=True) * 255
# self.preprocessed_frame = frm1.astype('uint8')
if stretch_intensity:
frm = rescale_intensity(frm)
# frm = self._contrast_equalization(frm)
# self.preprocessed_frame = self._contrast_equalization(self.preprocessed_frame)
return frm
def _denoise(self, img, weight):
"""
use TV-denoise to remove noise
http://scipy-lectures.github.com/advanced/image_processing/
http://en.wikipedia.org/wiki/Total_variation_denoising
"""
from skimage.filters import denoise_tv_chambolle
img = denoise_tv_chambolle(img, weight=weight) * 255
return img.astype('uint8')
# def _contrast_equalization(self, img):
# """
# rescale intensities to maximize contrast
# """
# # from numpy import percentile
# # Contrast stretching
# # p2 = percentile(img, 2)
# # p98 = percentile(img, 98)
#
# return rescale_intensity(asarray(img))
# ===============================================================================
# deviation calc
# ===============================================================================
# def _square_approximation(self, src, target, dim):
# tx, ty = self._get_frame_center(src)
# pts = target.poly_points
#
#
# cx, cy = dx + tx, dy + ty
# dy = -dy
# self._draw_indicator(src, (cx, cy), color=(255, 0, 128), shape='crosshairs')
#
# return dx, dy
def _arc_approximation(self, src, target, dim):
"""
find cx,cy of a circle with r radius using the arc center method
only preform if target has high convexity
convexity is simply defined as ratio of area to convex hull area
"""
tol = 0.8
if target.convexity > tol:
self.info('doing arc approximation radius={}'.format(dim))
tx, ty = self._get_frame_center(src)
pts = target.poly_points
pts[:, 1] = pts[:, 1] - ty
pts[:, 0] = pts[:, 0] - tx
dx, dy = approximate_polygon_center(pts, dim)
cx, cy = dx + tx, dy + ty
dy = -dy
self._draw_indicator(src, (cx, cy), color=(255, 0, 128), shape='crosshairs')
draw_circle_perimeter(src, cx, cy, round(dim), color=(255, 0, 128))
else:
dx, dy = self._calculate_error([target])
return dx, dy
def _calculate_error(self, targets):
"""
calculate the dx,dy
deviation of the targets centroid from the center of the image
"""
def hist(d):
f, v = histogram(array(d))
i = len(f) if argmax(f) == len(f) - 1 else argmax(f)
return v[i]
devxs, devys = list(zip(*[r.dev_centroid for r in targets]))
if len(targets) > 2 and self.use_histogram:
dx = hist(devxs)
dy = hist(devys)
else:
def avg(s):
return sum(s) / len(s)
dx = avg(devxs)
dy = avg(devys)
return -dx, dy
# ===============================================================================
# helpers
# ===============================================================================
def _make_targets(self, pargs, origin):
"""
convenience function for assembling target list
"""
targets = []
for pi, ai, co, ci, pa, pch, mask in pargs:
if len(pi) < 5:
continue
tr = Target()
tr.origin = origin
tr.poly_points = pi
# tr.bounding_rect = br
tr.area = ai
tr.min_enclose_area = co
tr.centroid = ci
tr.pactual = pa
tr.pconvex_hull = pch
tr.mask = mask
targets.append(tr)
return targets
def _filter(self, targets, func, *args, **kw):
return [ti for ti in targets if func(ti, *args, **kw)]
def _target_near_center(self, target, *args, **kw):
return self._near_center(target.centroid, *args, **kw)
def _near_center(self, xy, frame, tol=0.75):
"""
is the point xy within tol distance of the center
"""
cxy = self._get_frame_center(frame)
d = calc_length(xy, cxy)
tol *= self.pxpermm
return d < tol
def _get_filter_target_area(self, shape, dim):
"""
calculate min and max bounds of valid polygon areas
"""
if shape == 'circle':
miholedim = 0.5 * dim
maholedim = 1.25 * dim
mi = miholedim ** 2 * 3.1415
ma = maholedim ** 2 * 3.1415
else:
d = (2*dim)**2
mi = 0.5 * d
ma = 1.25 * d
return mi, ma
def _get_frame_center(self, src):
"""
convenience function for geting center of image in c,r from
"""
w, h = get_size(src)
x = w / 2
y = h / 2
return x, y
# ===============================================================================
# draw
# ===============================================================================
def _draw_targets(self, src, targets):
"""
draw a crosshairs indicator
"""
if targets:
for ta in targets:
pt = new_point(*ta.centroid)
self._draw_indicator(src, pt,
color=(0, 255, 0),
size=10,
shape='crosshairs')
# draw_circle(src, pt,
# color=(0,255,0),
# radius=int(dim))
draw_polygons(src, [ta.poly_points], color=(255, 255, 255))
def _draw_center_indicator(self, src, color=(0, 0, 255), shape='crosshairs',
size=10, radius=1):
"""
draw indicator at center of frame
"""
cpt = self._get_frame_center(src)
self._draw_indicator(src, new_point(*cpt),
# shape='crosshairs',
shape=shape,
color=color,
size=size)
# draw_circle_perimeter(src, cpt[0], cpt[1], radius, color=color)
def _draw_indicator(self, src, center, color=(255, 0, 0), shape='circle',
size=4, thickness=-1):
"""
convenience function for drawing indicators
"""
if isinstance(center, tuple):
center = new_point(*center)
r = size
if shape == 'rect':
draw_rectangle(src, center.x - r / 2., center.y - r / 2., r, r,
color=color,
thickness=thickness)
elif shape == 'crosshairs':
draw_lines(src,
[[(center.x - size, center.y),
(center.x + size, center.y)],
[(center.x, center.y - size),
(center.x, center.y + size)]],
color=color,
thickness=1)
else:
draw_circle(src, center[0], center[1], r, color=color)
# ============= EOF =============================================
# def _segment_polygon2(self, image, frame, target,
# dim,
# cthreshold, mi, ma):
#
# pychron = image.source_frame[:]
#
# # find the label with the max area ie max of histogram
# def get_limits(values, bins, width=1):
# ind = argmax(values)
# if ind == 0:
# bil = bins[ind]
# biu = bins[ind + width]
# elif ind == len(bins) - width:
# bil = bins[ind - width]
# biu = bins[ind]
# else:
# bil = bins[ind - width]
# biu = bins[ind + width]
#
# return bil, biu, ind
#
# wh = get_size(pychron)
# # make image with polygon
# im = zeros(wh)
# points = asarray(target.poly_points)
# rr, cc = polygon(*points.T)
#
# # points = asarray([(pi.x, pi.y) for pi in points])
# # rr, cc = polygon(points[:, 0], points[:, 1])
#
# im[cc, rr] = 255
#
# # do watershedding
# distance = ndimage.distance_transform_edt(im)
# local_maxi = feature.peak_local_max(distance, labels=im,
# indices=False,
# footprint=ones((3, 3))
# )
# markers, ns = ndimage.label(local_maxi)
# #
# wsrc = watershed(-distance, markers,
# mask=im
# )
#
# # print wsrc[50]
# # print colorspace(distance)
# # debug_show(im, ws, seg1)
# # debug_show(im, distance, wsrc, nimage)
# # bins = 3 * number of labels. this allows you to precisely pick the value of the max area
# values, bins = histogram(wsrc, bins=ns * 3)
# bil, biu, ind = get_limits(values, bins)
# # ma = max()
# # print ma
# # nimage = ndimage.label(wsrc > biu)[0]
#
# # nimage = nimage.astype('uint8') * 255
# if not bil:
# values = delete(values, ind)
# bins = delete(bins, (ind, ind + 1))
# bil, biu, ind = get_limits(values, bins)
# #
# nimage = ones_like(wsrc, dtype='uint8') * 255
# nimage[wsrc < bil] = 0
# nimage[wsrc > biu] = 0
#
# # image.source_frame = colorspace(nimage)
#
# # image.refresh = True
# # time.sleep(1)
#
# # debug_show(im, distance, wsrc, nimage)
# nimage = invert(nimage)
# # img = nimage
# # # img = asMat(nimage)
# # # locate new polygon from the segmented image
# tars = self._find_targets(image, nimage, dim,
# start=10, w=4, n=2, set_image=False)
# # # tars = None
# # # do_later(lambda: self.debug_show(im, distance, wsrc, nimage))
#
# # tars = None
# if tars:
# target = tars[0]
# return self._test_target(frame, target, cthreshold, mi, ma)
# else:
# return False, False, False
# from numpy import linspace, pi, cos, sin, radians
# from math import atan2
# from scipy.optimize import fmin
# # dx, dy = None, None
# # for ta in targets:
# pts = array([(p.x, p.y) for p in target.poly_points], dtype=float)
# pts = sort_clockwise(pts, pts)
# pts = convex_hull(pts)
# cx, cy = target.centroid
# px, py = pts.T
#
# tx, ty = self._get_frame_center(pychron)
# px -= cx
# py -= cy
#
# r = dim * 0.5
# ts = array([atan2(p[1] - cx, p[0] - cy) for p in pts])
# # ts += 180
# n = len(ts)
# hidx = n / 2
# h1 = ts[:hidx]
#
# offset = 0 if n % 2 == 0 else 1
#
# # h1 = array([ti for ti in ts if ti < 180])
# # h1 = radians(h1)
# # hidx = len(h1)
# # print len(ts), hidx
# # offset = 0
# def cost(p0):
# '''
# cost function
#
# A-D: chord of the polygon
# B-C: radius of fit circle
#
# A B C D
#
# try to minimize difference fit circle and polygon approx
# cost=dist(A,B)+dist(C,D)
# '''
# # r = p0[2]
# # northern hemicircle
# cix1, ciy1 = p0[0] - cx + r * cos(h1), p0[1] - cy + r * sin(h1)
#
# # southern hemicircle
# cix2, ciy2 = p0[0] - cx + r * cos(h1 + pi), p0[1] - cy + r * sin(h1 + pi)
#
# dx, dy = px[:hidx] - cix1, py[:hidx] - ciy1
# p1 = (dx ** 2 + dy ** 2) ** 0.5
#
# # dx, dy = cix2 - px[hidx + offset:], ciy2 - py[hidx + offset:]
# dx, dy = px[hidx + offset:] - cix2, py[hidx + offset:] - ciy2
# p2 = (dx ** 2 + dy ** 2) ** 0.5
# # print 'p1', p1
# # print 'p2', p2
# return ((p2 - p1) ** 2).sum()
# # return (p1 + p2).mean()
# # return p2.sum() + p1.sum()
#
# # minimize the cost function
# dx, dy = fmin(cost, x0=[0, 0], disp=False) # - ta.centroid
# print dx, dy, ty, cy
# # dy -= cy
# # dx -= cx
#
# # print ty + cy, dy
# self._draw_indicator(pychron, (dx, dy), shape='rect')
# draw_circle(pychron, (dx, dy), int(r))
#
# return dx - target.origin[0], dy - target.origin[1]
# def debug_show(image, distance, wsrc, nimage):
#
# import matplotlib.pyplot as plt
# fig, axes = plt.subplots(ncols=4, figsize=(8, 2.7))
# ax0, ax1, ax2, ax3 = axes
#
# ax0.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
# ax1.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
# ax2.imshow(wsrc, cmap=plt.cm.jet, interpolation='nearest')
# ax3.imshow(nimage, cmap=plt.cm.jet, interpolation='nearest')
#
# for ax in axes:
# ax.axis('off')
#
# plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
# right=1)
# plt.show()
# def find_circle(self, image, frame, dim, **kw):
# dx, dy = None, None
#
# pframe = self._preprocess(frame, blur=0)
# edges = canny(pframe, sigma=3)
# hough_radii = arange(dim * 0.9, dim * 1.1, 2)
#
# hough_res = hough_circle(edges, hough_radii)
#
# centers = []
# accums = []
# radii = []
# for radius, h in zip(hough_radii, hough_res):
# # For each radius, extract two circles
# num_peaks = 2
# peaks = peak_local_max(h, num_peaks=num_peaks)
# centers.extend(peaks)
# accums.extend(h[peaks[:, 0], peaks[:, 1]])
# radii.extend([radius] * num_peaks)
#
# # for idx in argsort(accums)[::-1][:1]:
# try:
# idx = argsort(accums)[::-1][0]
# except IndexError:
# return dx, dy
#
# center_y, center_x = centers[idx]
# radius = radii[idx]
#
# draw_circle_perimeter(frame, center_x, center_y, radius, (220, 20, 20))
# # cx, cy = circle_perimeter(int(center_x), int(center_y), int(radius))
#
# # draw perimeter
# # try:
# # frame[cy, cx] = (220, 20, 20)
# # except IndexError:
# # pass
#
# # draw center
# # cx, cy = circle(int(center_x), int(center_y), int(2))
# # frame[cy, cx] = (220, 20, 20)
# draw_circle(frame, center_x, center_y, 2, (220, 20, 20))
#
# h, w = frame.shape[:2]
#
# ox, oy = w / 2, h / 2
# dx = center_x - ox
# dy = center_y - oy
#
# cx, cy = circle(int(ox), int(oy), int(2))
# frame[cy, cx] = (20, 220, 20)
#
# image.set_frame(frame)
# return float(dx), -float(dy)
| apache-2.0 |
e-koch/VLA_Lband | ancillary_data/HST/HI_properties_near_feedback.py | 1 | 17038 |
'''
Pull out HI properties (and/or others) from a set of point sources.
Create a distance map as a function of distance from the nearest source.
'''
import astropy.coordinates as coord
from astropy.table import Table, Column
import astropy.units as u
import astropy.constants as const
import numpy as np
from galaxies import Galaxy
import scipy.ndimage as nd
from astropy.io import fits
from spectral_cube import SpectralCube, Projection
from spectral_cube.analysis_utilities import stack_spectra
from astropy.utils.console import ProgressBar
import matplotlib.pyplot as plt
from plotting_styles import default_figure
from constants import hi_freq, hi_mass_conversion
# from paths import allfigs_path
def distance_map_from_catalogue(gal, tab, header, ra_key="RA", dec_key="Dec",
diam_key=None):
'''
Create a distance map from a set of sky location in a catalogue.
'''
if not isinstance(gal, Galaxy):
raise TypeError("gal must be a Galaxy instance.")
ra = tab[ra_key]
dec = tab[dec_key]
coords = coord.SkyCoord(ra, dec, frame='icrs', unit=(u.deg, u.deg))
# Assumes that the table column has a unit attached that Table can distinguish
if diam_key is not None:
# Assume pc. Lost units in the save table??
diams = tab[diam_key].quantity * u.pc
radii = gal.radius(header=header)
coord_map = gal.skycoord_grid(header=header)
object_mask = np.zeros_like(coord_map.ra.value, dtype=int)
# Loop through and mask points belonging at a remnant, or the nearest point
for i, co in enumerate(coords):
mask_index = np.unravel_index(coord_map.separation(co).argmin(),
object_mask.shape)
if diam_key is not None:
# Major axis diameter
diam_rad = (diams[i].to(u.pc) / gal.distance).to(u.dimensionless_unscaled).value * u.rad
diam_pix = diam_rad.to(u.deg).value / np.abs(header['CDELT2'])
# Gather all pixels with a circular region
yy, xx = np.mgrid[-(int(diam_pix)//2 + 1):int(diam_pix)//2 + 1,
-(int(diam_pix)//2 + 1):int(diam_pix)//2 + 1]
# Find all pixels within the diameter
valids = np.where(np.sqrt(yy**2 + xx**2) < diam_pix / 2.)
y_pts = valids[0] + mask_index[0]
x_pts = valids[1] + mask_index[1]
mask_index = (y_pts, x_pts)
object_mask[mask_index] = i + 1
# print(object_mask[mask_index])
# print(mask_index)
# print((object_mask > 0).sum())
dist_transf = nd.distance_transform_edt(~(object_mask > 0))
return object_mask, dist_transf
def find_bubble_props(dist_bins, int_profile, lwidth_profile, obj_diam,
disk_height=100 * u.pc / np.cos(55.1 * u.deg),
mass_conv_factor=None):
'''
Dumb estimations of bubble properties based on integrated intensity and
line width profiles.
'''
# Define the shell radius based on the distance of the peak
arg_max = np.argmax(int_profile)
# If the centre is the peak, assume it is unresolved
if arg_max == 0:
shell_rad = obj_diam / 2.
else:
shell_rad = obj_diam / 2. + dist_bins[arg_max]
# Assume a disk scale height and check if the radius of the shell
# exceeds it
if shell_rad > disk_height:
# It has maybe broken out of the disk. Adjust volume as needed
# Subtract off caps of the sphere
vol = (4 * np.pi / 3.) * shell_rad**3 - \
(2 * np.pi / 3.) * (shell_rad - disk_height)**2 * (2 * shell_rad + disk_height)
else:
# Likely still contained within the disk
vol = (4 * np.pi / 3.) * shell_rad**3
# Awful estimations of the velocity expansion. Assume velocity dispersion
# is exactly the same...
# Don't know how to do that with any sort of logic applied, so let it be
# the dispersion in the peak bin.
v_exp = lwidth_profile[arg_max]
# Now the integ intensity. If unresolved, we don't have an estimate of the
# background. Assume the last distance bin as a background?? Otherwise take
# the larger of the innermost and outermost when resolved.
peak_int = int_profile[arg_max]
if arg_max == 0:
bkg_int = int_profile[-1]
else:
bkg_int = max(int_profile[0], int_profile[-1])
hole_mass = np.pi * shell_rad**2 * bkg_int
shell_mass = np.pi * shell_rad**2 * \
(peak_int - bkg_int)
if mass_conv_factor is not None:
hole_mass *= mass_conv_factor
shell_mass *= mass_conv_factor
# Estimate an avg volume density within the hole. Don't do this
# for unresolved holes
if arg_max == 0:
energy = np.NaN * u.erg
vol_dens = np.NaN * u.cm**-3
else:
# Chevalier 74 expansion energy formula
vol_dens = ((shell_mass / (1.4 * const.m_p)) / vol).to(u.cm**-3)
energy = 5.3e43 * vol_dens.value**1.12 * \
shell_rad.to(u.pc).value**3.12 * v_exp.to(u.km / u.s).value**1.4 * u.erg
return shell_rad, vol, v_exp, hole_mass, shell_mass, vol_dens, energy
default_figure()
# Update this for the server files (geometry should be the same though)
gal = Galaxy("M33")
gal.distance = 840 * u.kpc
hi_cube = SpectralCube.read("/Volumes/Travel_Data/M33_2/HI/M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.fits")
peak_vel = Projection.from_hdu(fits.open("/Volumes/Travel_Data/M33_2/HI/M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.peakvels.fits"))
mom0 = Projection.from_hdu(fits.open("/Volumes/Travel_Data/M33_2/HI/M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.mom0.fits"))
beam = mom0.beam
moment0_Kkm_s = beam.jtok(hi_freq).value * mom0.value / 1000.
moment0_surfdens = moment0_Kkm_s * hi_mass_conversion * (u.K * u.km / u.s) * np.cos(55.1 * u.deg)
lwidth = Projection.from_hdu(fits.open("/Volumes/Travel_Data/M33_2/HI/M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.lwidth.fits"))
snr_tab = Table.read("/Volumes/Travel_Data/M33_2/MMT_SNR_catalogue_long18_combined.txt",
format='ascii')
# Also consider weighting by something like ~1/sqrt(L) to place distances
# on a common "scale"
index_mask, dist_transf = \
distance_map_from_catalogue(gal, snr_tab, hi_cube.header,
diam_key='D')
# Get all points within ~100 pc.
dist_limit = np.arange(10) * 100 * u.pc
stacked_spectra = []
lwidth_bins = []
intint_bins = []
# Pick out individual regions
num = index_mask.max()
for n in ProgressBar(range(1, num + 1)):
reg_mask = index_mask == n
dist_transf_reg = nd.distance_transform_edt(~reg_mask)
lwidth_reg = []
intint_reg = []
# Calculate avg properties within the region
lwidth_reg.append([np.nanmean(lwidth[reg_mask].value),
np.nanstd(lwidth[reg_mask].value) / np.sqrt(reg_mask.sum() / 41.)])
intint_reg.append([np.nanmean(moment0_surfdens[reg_mask].value),
np.nanstd(moment0_surfdens[reg_mask].value) / np.sqrt(reg_mask.sum() / 41.)])
for i, (low, high) in enumerate(zip(dist_limit[:-1], dist_limit[1:])):
# print("On bin {}".format(i + 1))
dist_ang_low = (low / gal.distance.to(u.pc)).value * u.rad
dist_pix_low = dist_ang_low.to(u.deg).value / np.abs(hi_cube.header['CDELT2'])
dist_ang_high = (high / gal.distance.to(u.pc)).value * u.rad
dist_pix_high = dist_ang_high.to(u.deg).value / np.abs(hi_cube.header['CDELT2'])
dist_mask = np.logical_and(dist_transf_reg > dist_pix_low,
dist_transf_reg <= dist_pix_high)
num_beams = dist_mask.sum() / 41.
intint_reg.append([np.nanmean(moment0_surfdens[dist_mask].value),
np.nanstd(moment0_surfdens[dist_mask].value) / np.sqrt(num_beams)])
lwidth_reg.append([np.nanmean(lwidth[dist_mask].value),
np.nanstd(lwidth[dist_mask].value) / np.sqrt(num_beams)])
# stacked_spectra.append(stack_spectra(hi_cube, peak_vel,
# xy_posns=np.where(dist_mask),
# progressbar=True,
# chunk_size=10000))
intint_reg = u.Quantity(intint_reg) * (u.solMass / u.pc**2)
lwidth_reg = u.Quantity(lwidth_reg) * (u.m / u.s)
intint_bins.append(intint_reg)
lwidth_bins.append(lwidth_reg)
snr_props = {"shell_rad": [], "vol": [], "v_exp": [], "hole_mass": [],
"shell_mass": [], "vol_dens": [], "energy": []}
# Half bins except for the first at 0.
dist_bin_corr = u.Quantity([dist_limit[0].value] + list(dist_limit[1:].value - 50)) * u.pc
show_plots = True
for i, (obj, obj2) in enumerate(zip(intint_bins, lwidth_bins)):
out_props = find_bubble_props(dist_bin_corr, obj[:, 0], obj2[:, 0],
snr_tab['D'][i] * u.pc)
snr_props['shell_rad'].append(out_props[0].value)
snr_props['vol'].append(out_props[1].value)
snr_props['v_exp'].append(out_props[2].value)
snr_props['hole_mass'].append(out_props[3].value)
snr_props['shell_mass'].append(out_props[4].value)
snr_props['vol_dens'].append(out_props[5].value)
snr_props['energy'].append(out_props[6].value)
if show_plots:
fig = plt.figure(figsize=(12, 6))
fig.add_subplot(131)
pix = np.where(index_mask == i + 1)
xlow, xhigh = np.min(pix[1]), np.max(pix[1])
ylow, yhigh = np.min(pix[0]), np.max(pix[0])
lim_slice = [slice(ylow - 50, yhigh + 50), slice(xlow - 50, xhigh + 50)]
plt.imshow(mom0.value[lim_slice], origin='lower')
plt.contour((index_mask == i + 1)[lim_slice], colors='b')
fig.add_subplot(132)
plt.errorbar(dist_limit.value, obj[:, 0].value, yerr=obj[:, 1].value, drawstyle='steps-mid')
plt.xlabel("Distance (pc)")
plt.ylabel(r"Surf. Density (Msol/pc$^2$)")
fig.add_subplot(133)
plt.errorbar(dist_limit.value, obj2[:, 0].value / 1000.,
yerr=obj2[:, 1].value / 1000., drawstyle='steps-mid')
plt.xlabel("Distance (pc)")
plt.ylabel(r"Line Width (km/s)")
plt.tight_layout()
plt.draw()
print(out_props)
input("{}".format(i + 1))
plt.close()
snr_props['shell_rad'] = snr_props['shell_rad'] * u.pc
snr_props['vol'] = snr_props['vol'] * u.pc**3
snr_props['v_exp'] = snr_props['v_exp'] * u.km / u.s
snr_props['hole_mass'] = snr_props['hole_mass'] * u.solMass
snr_props['shell_mass'] = snr_props['shell_mass'] * u.solMass
snr_props['vol_dens'] = snr_props['vol_dens'] * u.cm**-3
snr_props['energy'] = snr_props['energy'] * u.erg
# Now we want to do something similar around the O-stars
# Using colour cuts for the half-brick near 604 (for now)
# import dask.dataframe as dd
import pandas as pd
df_phot = pd.read_hdf("/Volumes/Travel_Data/M33_2/Hubble/14610_M33-B01_1.phot.Ocut.hdf5",
key='data')
index_mask_O, dist_transf_O = \
distance_map_from_catalogue(gal, df_phot, hi_cube.header,
diam_key=None, ra_key='ra',
dec_key='dec')
stacked_spectra_O = []
lwidth_bins_O = []
intint_bins_O = []
# Pick out individual regions
# labels, num = nd.label(dist_transf_O == 0.)
num = index_mask_O.max()
for n in ProgressBar(range(1, num + 1)):
reg_mask = index_mask_O == n
dist_transf_reg = nd.distance_transform_edt(~reg_mask)
lwidth_reg = []
intint_reg = []
# Calculate avg properties within the region
lwidth_reg.append([np.nanmean(lwidth[reg_mask].value),
np.nanstd(lwidth[reg_mask].value) / np.sqrt(reg_mask.sum() / 41.)])
intint_reg.append([np.nanmean(moment0_surfdens[reg_mask].value),
np.nanstd(moment0_surfdens[reg_mask].value) / np.sqrt(reg_mask.sum() / 41.)])
for i, (low, high) in enumerate(zip(dist_limit[:-1], dist_limit[1:])):
# print("On bin {}".format(i + 1))
dist_ang_low = (low / gal.distance.to(u.pc)).value * u.rad
dist_pix_low = dist_ang_low.to(u.deg).value / np.abs(hi_cube.header['CDELT2'])
dist_ang_high = (high / gal.distance.to(u.pc)).value * u.rad
dist_pix_high = dist_ang_high.to(u.deg).value / np.abs(hi_cube.header['CDELT2'])
dist_mask = np.logical_and(dist_transf_reg > dist_pix_low,
dist_transf_reg <= dist_pix_high)
num_beams = dist_mask.sum() / 41.
intint_reg.append([np.nanmean(moment0_surfdens[dist_mask].value),
np.nanstd(moment0_surfdens[dist_mask].value) / np.sqrt(num_beams)])
lwidth_reg.append([np.nanmean(lwidth[dist_mask].value),
np.nanstd(lwidth[dist_mask].value) / np.sqrt(num_beams)])
# stacked_spectra.append(stack_spectra(hi_cube, peak_vel,
# xy_posns=np.where(dist_mask),
# progressbar=True,
# chunk_size=10000))
intint_reg = u.Quantity(intint_reg) * (u.solMass / u.pc**2)
lwidth_reg = u.Quantity(lwidth_reg) * (u.m / u.s)
intint_bins_O.append(intint_reg)
lwidth_bins_O.append(lwidth_reg)
# Plot individual profiles
o_props = {"shell_rad": [], "vol": [], "v_exp": [], "hole_mass": [],
"shell_mass": [], "vol_dens": [], "energy": []}
# Half bins except for the first at 0.
dist_bin_corr = u.Quantity([dist_limit[0].value] + list(dist_limit[1:].value - 50)) * u.pc
show_plots = False
for i, (obj, obj2) in enumerate(zip(intint_bins, lwidth_bins)):
out_props = find_bubble_props(dist_bin_corr, obj[:, 0], obj2[:, 0],
0. * u.pc)
o_props['shell_rad'].append(out_props[0].value)
o_props['vol'].append(out_props[1].value)
o_props['v_exp'].append(out_props[2].value)
o_props['hole_mass'].append(out_props[3].value)
o_props['shell_mass'].append(out_props[4].value)
o_props['vol_dens'].append(out_props[5].value)
o_props['energy'].append(out_props[6].value)
if show_plots:
plt.subplot(131)
pix = np.where(index_mask_O == i + 1)
if len(pix[0]) == 0:
print("Found duplicated pixel location for {}.".format(i))
continue
xlow, xhigh = np.min(pix[1]), np.max(pix[1])
ylow, yhigh = np.min(pix[0]), np.max(pix[0])
lim_slice = [slice(ylow - 50, yhigh + 50), slice(xlow - 50, xhigh + 50)]
plt.imshow(mom0.value[lim_slice], origin='lower')
plt.contour((index_mask_O == i + 1)[lim_slice], colors='b')
plt.subplot(132)
plt.errorbar(dist_limit.value, obj[:, 0].value, yerr=obj[:, 1].value, drawstyle='steps-mid')
plt.subplot(133)
plt.errorbar(dist_limit.value, obj2[:, 0].value, yerr=obj2[:, 1].value, drawstyle='steps-mid')
plt.draw()
print(out_props)
input("{}".format(i + 1))
plt.clf()
o_props['shell_rad'] = o_props['shell_rad'] * u.pc
o_props['vol'] = o_props['vol'] * u.pc**3
o_props['v_exp'] = o_props['v_exp'] * u.km / u.s
o_props['hole_mass'] = o_props['hole_mass'] * u.solMass
o_props['shell_mass'] = o_props['shell_mass'] * u.solMass
o_props['vol_dens'] = o_props['vol_dens'] * u.cm**-3
o_props['energy'] = o_props['energy'] * u.erg
# Convert into dataframes and save
snr_hi_tab = Table([Column(snr_props[key]) for key in snr_props],
names=snr_props.keys())
snr_hi_tab.write("/Volumes/Travel_Data/M33_2/HI/HI_snr_props.csv")
o_tab = Table([Column(o_props[key]) for key in o_props],
names=o_props.keys())
o_tab.write("/Volumes/Travel_Data/M33_2/HI/HI_Ostar_props_M33-B01_1.csv")
# Compare some properties together
# Define the save path
import os
allfigs_path = lambda x: os.path.join(os.path.expanduser("~/Dropbox/Various Plots/M33/"), x)
_ = plt.hist(snr_hi_tab['shell_rad'], bins='auto', alpha=0.3, label='SNR')
_ = plt.hist(o_tab['shell_rad'], bins='auto', alpha=0.3, label='O')
plt.xlabel("Shell Radius (pc)")
plt.tight_layout()
plt.savefig(allfigs_path("feedback/feedback_sources_HI_shell_radius.png"))
plt.savefig(allfigs_path("feedback/feedback_sources_HI_shell_radius.pdf"))
plt.close()
_ = plt.hist(np.log10(snr_hi_tab['energy']
[np.isfinite(snr_hi_tab['energy']) & (snr_hi_tab['energy'] > 0.)]),
bins='auto', alpha=0.3, label='SNR')
_ = plt.hist(np.log10(o_tab['energy']
[np.isfinite(o_tab['energy']) & (o_tab['energy'] > 0.)]),
bins='auto', alpha=0.3, label='O')
plt.xlabel("log Energy (erg)")
plt.tight_layout()
plt.savefig(allfigs_path("feedback/feedback_sources_HI_energy.png"))
plt.savefig(allfigs_path("feedback/feedback_sources_HI_energy.pdf"))
plt.close()
# Properties are similar. Can probably only believe a handful of small
# (<200 pc) but resolved sources. | mit |
jrbadiabo/Coursera-Stanford-ML-Class | Python_Version/Ex1.Linear_Regresion_with_one_variable/ex1_multi.py | 1 | 3757 | from matplotlib import use
use('TkAgg')
import numpy as np
import matplotlib.pyplot as plt
from gradientDescentMulti import gradientDescentMulti
from normalEqn import normalEqn
from featureNormalize import featureNormalize
from show import show
# ================ Part 1: Feature Normalization ================
print 'Loading data ...'
# Load Data
data = np.loadtxt('ex1data2.txt', delimiter=',')
X = data[:, :2]
y = data[:, 2]
m = y.T.size
# Print out some data points
print 'First 10 examples from the dataset:'
print np.column_stack( (X[:10], y[:10]) )
raw_input("Program paused. Press Enter to continue...")
# Scale features and set them to zero mean
print 'Normalizing Features ...'
X, mu, sigma = featureNormalize(X)
print '[mu] [sigma]'
print mu, sigma
# Add intercept term to X
X = np.concatenate((np.ones((m, 1)), X), axis=1)
# ================ Part 2: Gradient Descent ================
#
# ====================== YOUR CODE HERE ======================
# Instructions: We have provided you with the following starter
# code that runs gradient descent with a particular
# learning rate (alpha).
#
# Your task is to first make sure that your functions -
# computeCost and gradientDescent already work with
# this starter code and support multiple variables.
#
# After that, try running gradient descent with
# different values of alpha and see which one gives
# you the best result.
#
# Finally, you should complete the code at the end
# to predict the price of a 1650 sq-ft, 3 br house.
#
# Hint: By using the 'hold on' command, you can plot multiple
# graphs on the same figure.
#
# Hint: At prediction, make sure you do the same feature normalization.
#
print 'Running gradient descent ...'
# Choose some alpha value
alpha = 0.01
num_iters = 400
# Init Theta and Run Gradient Descent
theta = np.zeros(3)
theta, J_history = gradientDescentMulti(X, y, theta, alpha, num_iters)
# Plot the convergence graph
plt.plot(J_history, '-b')
plt.xlabel('Number of iterations')
plt.ylabel('Cost J')
show()
raw_input("Program paused. Press Enter to continue...")
# Display gradient descent's result
print 'Theta computed from gradient descent: '
print theta
# Estimate the price of a 1650 sq-ft, 3 br house
price = np.array([1,3,1650]).dot(theta)
print 'Predicted price of a 1650 sq-ft, 3 br house'
print '(using gradient descent): '
print price
raw_input("Program paused. Press Enter to continue...")
# ================ Part 3: Normal Equations ================
# ====================== YOUR CODE HERE ======================
# Instructions: The following code computes the closed form
# solution for linear regression using the normal
# equations. You should complete the code in
# normalEqn.m
#
# After doing so, you should complete this code
# to predict the price of a 1650 sq-ft, 3 br house.
#
print 'Solving with normal equations...'
# Load Data
data = np.loadtxt('ex1data2.txt', delimiter=',')
X = data[:, :2]
y = data[:, 2]
m = y.T.size
# Add intercept term to X
X = np.concatenate((np.ones((m,1)), X), axis=1)
# Calculate the parameters from the normal equation
theta = normalEqn(X, y)
# Display normal equation's result
print 'Theta computed from the normal equations:'
print ' %s \n' % theta
# Estimate the price of a 1650 sq-ft, 3 br house
price = np.array([1, 3, 1650]).dot(theta)
# ============================================================
print "Predicted price of a 1650 sq-ft, 3 br house "
print '(using normal equations):\n $%f\n' % price
raw_input("Program paused. Press Enter to continue...")
| mit |
ndingwall/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 11 | 5041 | """
============================
Gradient Boosting regression
============================
This example demonstrates Gradient Boosting to produce a predictive
model from an ensemble of weak predictive models. Gradient boosting can be used
for regression and classification problems. Here, we will train a model to
tackle a diabetes regression task. We will obtain the results from
:class:`~sklearn.ensemble.GradientBoostingRegressor` with least squares loss
and 500 regression trees of depth 4.
Note: For larger datasets (n_samples >= 10000), please refer to
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Maria Telenczuk <https://github.com/maikia>
# Katrina Ni <https://github.com/nilichen>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, ensemble
from sklearn.inspection import permutation_importance
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# %%
# Load the data
# -------------------------------------
#
# First we need to load the data.
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# %%
# Data preprocessing
# -------------------------------------
#
# Next, we will split our dataset to use 90% for training and leave the rest
# for testing. We will also set the regression model parameters. You can play
# with these parameters to see how the results change.
#
# n_estimators : the number of boosting stages that will be performed.
# Later, we will plot deviance against boosting iterations.
#
# max_depth : limits the number of nodes in the tree.
# The best value depends on the interaction of the input variables.
#
# min_samples_split : the minimum number of samples required to split an
# internal node.
#
# learning_rate : how much the contribution of each tree will shrink.
#
# loss : loss function to optimize. The least squares function is used in this
# case however, there are many other options (see
# :class:`~sklearn.ensemble.GradientBoostingRegressor` ).
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1, random_state=13)
params = {'n_estimators': 500,
'max_depth': 4,
'min_samples_split': 5,
'learning_rate': 0.01,
'loss': 'ls'}
# %%
# Fit regression model
# -------------------------------------
#
# Now we will initiate the gradient boosting regressors and fit it with our
# training data. Let's also look and the mean squared error on the test data.
reg = ensemble.GradientBoostingRegressor(**params)
reg.fit(X_train, y_train)
mse = mean_squared_error(y_test, reg.predict(X_test))
print("The mean squared error (MSE) on test set: {:.4f}".format(mse))
# %%
# Plot training deviance
# -------------------------------------
#
# Finally, we will visualize the results. To do that we will first compute the
# test set deviance and then plot it against boosting iterations.
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(reg.staged_predict(X_test)):
test_score[i] = reg.loss_(y_test, y_pred)
fig = plt.figure(figsize=(6, 6))
plt.subplot(1, 1, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, reg.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
fig.tight_layout()
plt.show()
# %%
# Plot feature importance
# -------------------------------------
#
# Careful, impurity-based feature importances can be misleading for
# high cardinality features (many unique values). As an alternative,
# the permutation importances of ``reg`` can be computed on a
# held out test set. See :ref:`permutation_importance` for more details.
#
# For this example, the impurity-based and permutation methods identify the
# same 2 strongly predictive features but not in the same order. The third most
# predictive feature, "bp", is also the same for the 2 methods. The remaining
# features are less predictive and the error bars of the permutation plot
# show that they overlap with 0.
feature_importance = reg.feature_importances_
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
fig = plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, np.array(diabetes.feature_names)[sorted_idx])
plt.title('Feature Importance (MDI)')
result = permutation_importance(reg, X_test, y_test, n_repeats=10,
random_state=42, n_jobs=2)
sorted_idx = result.importances_mean.argsort()
plt.subplot(1, 2, 2)
plt.boxplot(result.importances[sorted_idx].T,
vert=False, labels=np.array(diabetes.feature_names)[sorted_idx])
plt.title("Permutation Importance (test set)")
fig.tight_layout()
plt.show()
| bsd-3-clause |
ycaihua/scikit-learn | sklearn/datasets/svmlight_format.py | 39 | 15319 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
line_pattern = u("%d")
else:
line_pattern = u("%.16g")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if query_id is not None:
feat = (y[i], query_id[i], s)
else:
feat = (y[i], s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, one_based, comment, query_id)
| bsd-3-clause |
sem-geologist/hyperspy | hyperspy/tests/mva/test_bss.py | 2 | 13019 | import pytest
import numpy as np
import numpy.testing as nt
from hyperspy._signals.signal1d import Signal1D
from hyperspy._signals.signal2d import Signal2D
from hyperspy.misc.machine_learning.import_sklearn import sklearn_installed
from hyperspy.datasets import artificial_data
def are_bss_components_equivalent(c1_list, c2_list, atol=1e-4):
"""Check if two list of components are equivalent.
To be equivalent they must differ by a max of `atol` except
for an arbitraty -1 factor.
Parameters
----------
c1_list, c2_list: list of Signal instances.
The components to check.
atol: float
Absolute tolerance for the amount that they can differ.
Returns
-------
bool
"""
matches = 0
for c1 in c1_list:
for c2 in c2_list:
if (np.allclose(c2.data, c1.data, atol=atol) or
np.allclose(c2.data, -c1.data, atol=atol)):
matches += 1
return matches == len(c1_list)
class TestReverseBSS:
def setup_method(self, method):
s = artificial_data.get_core_loss_eels_line_scan_signal()
s.decomposition()
s.blind_source_separation(2)
self.s = s
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_autoreverse_default(self):
self.s.learning_results.bss_factors[:, 0] *= -1
self.s._auto_reverse_bss_component('loadings')
nt.assert_array_less(self.s.learning_results.bss_factors[:, 0], 0)
nt.assert_array_less(0, self.s.learning_results.bss_factors[:, 1])
self.s._auto_reverse_bss_component('factors')
nt.assert_array_less(0, self.s.learning_results.bss_factors)
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_autoreverse_on_loading(self):
self.s._auto_reverse_bss_component('loadings')
nt.assert_array_less(0, self.s.learning_results.bss_factors)
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_reverse_wrong_parameter(self):
with pytest.raises(ValueError):
self.s.blind_source_separation(2,
reverse_component_criterion='toto')
class TestBSS1D:
def setup_method(self, method):
ics = np.random.laplace(size=(3, 1000))
np.random.seed(1)
mixing_matrix = np.random.random((100, 3))
self.s = Signal1D(np.dot(mixing_matrix, ics))
self.s.decomposition()
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_on_loadings(self):
self.s.blind_source_separation(
3, diff_order=0, fun="exp", on_loadings=False)
s2 = self.s.as_signal1D(0)
s2.decomposition()
s2.blind_source_separation(
3, diff_order=0, fun="exp", on_loadings=True)
assert are_bss_components_equivalent(
self.s.get_bss_factors(), s2.get_bss_loadings())
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_mask_diff_order_0(self):
# This test, unlike most other tests, either passes or raises an error.
# It is designed to test if the mask is correctly dilated inside the
# `blind_source_separation_method`. If the mask is not correctely
# dilated the nan in the loadings should raise an error.
mask = self.s._get_signal_signal(dtype="bool")
mask.isig[5] = True
self.s.learning_results.factors[5, :] = np.nan
self.s.blind_source_separation(3, diff_order=0, mask=mask)
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_mask_diff_order_1(self):
# This test, unlike most other tests, either passes or raises an error.
# It is designed to test if the mask is correctly dilated inside the
# `blind_source_separation_method`. If the mask is not correctely
# dilated the nan in the loadings should raise an error.
mask = self.s._get_signal_signal(dtype="bool")
mask.isig[5] = True
self.s.learning_results.factors[5, :] = np.nan
self.s.blind_source_separation(3, diff_order=1, mask=mask)
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_mask_diff_order_0_on_loadings(self):
# This test, unlike most other tests, either passes or raises an error.
# It is designed to test if the mask is correctly dilated inside the
# `blind_source_separation_method`. If the mask is not correctely
# dilated the nan in the loadings should raise an error.
mask = self.s._get_navigation_signal(dtype="bool")
mask.isig[5] = True
self.s.learning_results.loadings[5, :] = np.nan
self.s.blind_source_separation(3, diff_order=0, mask=mask,
on_loadings=True)
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_mask_diff_order_1_on_loadings(self):
# This test, unlike most other tests, either passes or raises an error.
# It is designed to test if the mask is correctly dilated inside the
# `blind_source_separation_method`. If the mask is not correctely
# dilated the nan in the loadings should raise an error.
mask = self.s._get_navigation_signal(dtype="bool")
mask.isig[5] = True
self.s.learning_results.loadings[5, :] = np.nan
self.s.blind_source_separation(3, diff_order=1, mask=mask,
on_loadings=True)
class TestBSS2D:
def setup_method(self, method):
ics = np.random.laplace(size=(3, 1024))
np.random.seed(1)
mixing_matrix = np.random.random((100, 3))
s = Signal2D(np.dot(mixing_matrix, ics).reshape((100, 32, 32)))
for (axis, name) in zip(s.axes_manager._axes, ("z", "y", "x")):
axis.name = name
s.decomposition()
self.s = s
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_diff_axes_string_with_mask(self):
mask = self.s._get_signal_signal(dtype="bool")
mask.unfold()
mask.isig[5] = True
mask.fold()
self.s.learning_results.factors[5, :] = np.nan
factors = self.s.get_decomposition_factors().inav[:3]
self.s.blind_source_separation(
3, diff_order=0, fun="exp", on_loadings=False,
factors=factors.diff(axis="x", order=1),
mask=mask.diff(axis="x", order=1))
matrix = self.s.learning_results.unmixing_matrix.copy()
self.s.blind_source_separation(
3, diff_order=1, fun="exp", on_loadings=False,
diff_axes=["x"], mask=mask
)
assert np.allclose(matrix, self.s.learning_results.unmixing_matrix,
atol=1e-6)
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_diff_axes_string_without_mask(self):
factors = self.s.get_decomposition_factors().inav[:3].diff(
axis="x", order=1)
self.s.blind_source_separation(
3, diff_order=0, fun="exp", on_loadings=False, factors=factors)
matrix = self.s.learning_results.unmixing_matrix.copy()
self.s.blind_source_separation(
3, diff_order=1, fun="exp", on_loadings=False,
diff_axes=["x"],
)
assert np.allclose(matrix, self.s.learning_results.unmixing_matrix,
atol=1e-3)
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_diff_axes_without_mask(self):
factors = self.s.get_decomposition_factors().inav[:3].diff(
axis="y", order=1)
self.s.blind_source_separation(
3, diff_order=0, fun="exp", on_loadings=False, factors=factors)
matrix = self.s.learning_results.unmixing_matrix.copy()
self.s.blind_source_separation(
3, diff_order=1, fun="exp", on_loadings=False, diff_axes=[2],)
assert np.allclose(matrix, self.s.learning_results.unmixing_matrix,
atol=1e-3)
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_on_loadings(self):
self.s.blind_source_separation(
3, diff_order=0, fun="exp", on_loadings=False)
s2 = self.s.as_signal1D(0)
s2.decomposition()
s2.blind_source_separation(
3, diff_order=0, fun="exp", on_loadings=True)
assert are_bss_components_equivalent(
self.s.get_bss_factors(), s2.get_bss_loadings())
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_mask_diff_order_0(self):
# This test, unlike most other tests, either passes or raises an error.
# It is designed to test if the mask is correctly dilated inside the
# `blind_source_separation_method`. If the mask is not correctely
# dilated the nan in the loadings should raise an error.
mask = self.s._get_signal_signal(dtype="bool")
mask.unfold()
mask.isig[5] = True
mask.fold()
self.s.learning_results.factors[5, :] = np.nan
self.s.blind_source_separation(3, diff_order=0, mask=mask)
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_mask_diff_order_1(self):
# This test, unlike most other tests, either passes or raises an error.
# It is designed to test if the mask is correctly dilated inside the
# `blind_source_separation_method`. If the mask is not correctely
# dilated the nan in the loadings should raise an error.
mask = self.s._get_signal_signal(dtype="bool")
mask.unfold()
mask.isig[5] = True
mask.fold()
self.s.learning_results.factors[5, :] = np.nan
self.s.blind_source_separation(3, diff_order=1, mask=mask)
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_mask_diff_order_1_diff_axes(self):
# This test, unlike most other tests, either passes or raises an error.
# It is designed to test if the mask is correctly dilated inside the
# `blind_source_separation_method`. If the mask is not correctely
# dilated the nan in the loadings should raise an error.
mask = self.s._get_signal_signal(dtype="bool")
mask.unfold()
mask.isig[5] = True
mask.fold()
self.s.learning_results.factors[5, :] = np.nan
self.s.blind_source_separation(3, diff_order=1, mask=mask,
diff_axes=["x", ])
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_mask_diff_order_0_on_loadings(self):
# This test, unlike most other tests, either passes or raises an error.
# It is designed to test if the mask is correctly dilated inside the
# `blind_source_separation_method`. If the mask is not correctely
# dilated the nan in the loadings should raise an error.
mask = self.s._get_navigation_signal(dtype="bool")
mask.unfold()
mask.isig[5] = True
mask.fold()
self.s.learning_results.loadings[5, :] = np.nan
self.s.blind_source_separation(3, diff_order=0, mask=mask,
on_loadings=True)
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_mask_diff_order_1_on_loadings(self):
# This test, unlike most other tests, either passes or raises an error.
# It is designed to test if the mask is correctly dilated inside the
# `blind_source_separation_method`. If the mask is not correctely
# dilated the nan in the loadings should raise an error.
s = self.s.to_signal1D()
s.decomposition()
mask = s._get_navigation_signal(dtype="bool")
mask.unfold()
mask.isig[5] = True
mask.fold()
s.learning_results.loadings[5, :] = np.nan
s.blind_source_separation(3, diff_order=1, mask=mask,
on_loadings=True)
@pytest.mark.skipif(not sklearn_installed, reason="sklearn not installed")
def test_mask_diff_order_1_on_loadings_diff_axes(self):
# This test, unlike most other tests, either passes or raises an error.
# It is designed to test if the mask is correctly dilated inside the
# `blind_source_separation_method`. If the mask is not correctely
# dilated the nan in the loadings should raise an error.
s = self.s.to_signal1D()
s.decomposition()
mask = s._get_navigation_signal(dtype="bool")
mask.unfold()
mask.isig[5] = True
mask.fold()
s.learning_results.loadings[5, :] = np.nan
s.blind_source_separation(3, diff_order=1, mask=mask,
on_loadings=True, diff_axes=["x"])
| gpl-3.0 |
PabloPiaggi/plumed2 | user-doc/tutorials/others/ves-lugano2017-kinetics/TRAJECTORIES-1700K/cdf-analysis.py | 6 | 1134 | #!/usr/bin/env python
import numpy as np
from scipy.stats import ks_2samp
from scipy.optimize import curve_fit
from statsmodels.distributions.empirical_distribution import ECDF
import matplotlib.pyplot as plt
f=open('fpt.dat','r')
# define theoretical CDF
def func(x,tau):
return 1-np.exp(-x/tau)
x = []
count=0
for line in f:
line=line.strip()
columns=line.split()
x.append(float(columns[0]))
count=count+1
x = np.array(x)
# for numerical stability we divide by the mean
mu=x.mean()
x=x/mu
# now compute emirical CDF
ecdf = ECDF(x)
# plot ECDF
x1 = np.linspace(min(x), max(x))
y1 = ecdf(x1)
plt.step(x1*mu, y1,'k-',lw=3.)
# fit to theoretical CDF to obtain tau
popt,pcov = curve_fit(func,x1,y1)
tau=popt[0]
print 'mean of data',mu
print 'best fit tau',tau*mu
yfit=func(x1,tau)
# plot fit
plt.plot(x1*mu,yfit,'b-',lw=3.)
# for p-value
# now generate some random data with the same exponential distribution
np.random.seed(12345678);
x2 = np.random.exponential(1/tau,1000)
st,p = ks_2samp(x2,x)
print 'p-value',p
plt.xscale('log')
plt.xlabel('time [s]')
plt.ylabel('Cumulative Probability')
plt.show()
| lgpl-3.0 |
jmargeta/scikit-learn | sklearn/tests/test_multiclass.py | 3 | 12251 | import numpy as np
import warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron)
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
# FIXME: - should use sets
# - should move to metrics module
def multilabel_precision(Y_true, Y_pred):
n_predictions = 0
n_correct = 0
for i in range(len(Y_true)):
n_predictions += len(Y_pred[i])
for label in Y_pred[i]:
if label in Y_true[i]:
n_correct += 1
return float(n_correct) / n_predictions
def multilabel_recall(Y_true, Y_pred):
n_labels = 0
n_correct = 0
for i in range(len(Y_true)):
n_labels += len(Y_true[i])
for label in Y_pred[i]:
if label in Y_true[i]:
n_correct += 1
return float(n_correct) / n_labels
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent
X = np.ones((10, 2))
X[:5, :] = 0
y = [[int(i >= 5), 2, 3] for i in range(10)]
with warnings.catch_warnings(record=True):
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
#y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.65, 0.74), (0.72, 0.84)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(multilabel_precision(Y_test, Y_pred), prec,
decimal=2)
assert_almost_equal(multilabel_recall(Y_test, Y_pred), recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = [tuple(l.nonzero()[0]) for l in (Y_proba > 0.5)]
assert_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
ovr.fit(iris.data, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# test that ties are broken using the decision function, not defaulting to
# the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron())
ovo_prediction = multi_clf.fit(X, y).predict(X)
# recalculate votes to make sure we have a tie
predictions = np.vstack([clf.predict(X) for clf in multi_clf.estimators_])
scores = np.vstack([clf.decision_function(X)
for clf in multi_clf.estimators_])
# classifiers are in order 0-1, 0-2, 1-2
# aggregate votes:
votes = np.zeros((4, 3))
votes[np.arange(4), predictions[0]] += 1
votes[np.arange(4), 2 * predictions[1]] += 1
votes[np.arange(4), 1 + predictions[2]] += 1
# for the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# for the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# for the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], 1)
# score for one is greater than score for zero
assert_greater(scores[2, 0] - scores[0, 0], scores[0, 0] + scores[1, 0])
# score for one is greater than score for two
assert_greater(scores[2, 0] - scores[0, 0], -scores[1, 0] - scores[2, 0])
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
| bsd-3-clause |
HyukjinKwon/spark | python/pyspark/sql/dataframe.py | 9 | 102339 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
import warnings
from functools import reduce
from html import escape as html_escape
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, _local_iterator_from_socket
from pyspark.serializers import BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter, DataFrameWriterV2
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
from pyspark.sql.pandas.conversion import PandasConversionMixin
from pyspark.sql.pandas.map_ops import PandasMapOpsMixin
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(PandasMapOpsMixin, PandasConversionMixin):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the :class:`DataFrame`, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3.0
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
.. versionadded:: 1.3.0
Examples
--------
>>> df.toJSON().first()
'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
def registerTempTable(self, name):
"""Registers this DataFrame as a temporary table using the given name.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 1.3.0
.. deprecated:: 2.0.0
Use :meth:`DataFrame.createOrReplaceTempView` instead.
Examples
--------
>>> df.registerTempTable("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
warnings.warn(
"Deprecated in 2.0, use createOrReplaceTempView instead.",
FutureWarning
)
self._jdf.createOrReplaceTempView(name)
def createTempView(self, name):
"""Creates a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this :class:`DataFrame`.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
.. versionadded:: 2.0.0
Examples
--------
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this :class:`DataFrame`.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
.. versionadded:: 2.1.0
Examples
--------
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
.. versionadded:: 2.2.0
Examples
--------
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 1.4.0
Returns
-------
:class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except Exception as e:
raise ValueError(
"Unable to parse datatype from schema. %s" % e) from e
return self._schema
def printSchema(self):
"""Prints out the schema in the tree format.
.. versionadded:: 1.3.0
Examples
--------
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
def explain(self, extended=None, mode=None):
"""Prints the (logical and physical) plans to the console for debugging purpose.
.. versionadded:: 1.3.0
parameters
----------
extended : bool, optional
default ``False``. If ``False``, prints only the physical plan.
When this is a string without specifying the ``mode``, it works as the mode is
specified.
mode : str, optional
specifies the expected output format of plans.
* ``simple``: Print only a physical plan.
* ``extended``: Print both logical and physical plans.
* ``codegen``: Print a physical plan and generated codes if they are available.
* ``cost``: Print a logical plan and statistics if they are available.
* ``formatted``: Split explain output into two sections: a physical plan outline \
and node details.
.. versionchanged:: 3.0.0
Added optional argument `mode` to specify the expected output format of plans.
Examples
--------
>>> df.explain()
== Physical Plan ==
*(1) Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
>>> df.explain(mode="formatted")
== Physical Plan ==
* Scan ExistingRDD (1)
(1) Scan ExistingRDD [codegen id : 1]
Output [2]: [age#0, name#1]
...
>>> df.explain("cost")
== Optimized Logical Plan ==
...Statistics...
...
"""
if extended is not None and mode is not None:
raise ValueError("extended and mode should not be set together.")
# For the no argument case: df.explain()
is_no_argument = extended is None and mode is None
# For the cases below:
# explain(True)
# explain(extended=False)
is_extended_case = isinstance(extended, bool) and mode is None
# For the case when extended is mode:
# df.explain("formatted")
is_extended_as_mode = isinstance(extended, str) and mode is None
# For the mode specified:
# df.explain(mode="formatted")
is_mode_case = extended is None and isinstance(mode, str)
if not (is_no_argument or is_extended_case or is_extended_as_mode or is_mode_case):
argtypes = [
str(type(arg)) for arg in [extended, mode] if arg is not None]
raise TypeError(
"extended (optional) and mode (optional) should be a string "
"and bool; however, got [%s]." % ", ".join(argtypes))
# Sets an explain mode depending on a given argument
if is_no_argument:
explain_mode = "simple"
elif is_extended_case:
explain_mode = "extended" if extended else "simple"
elif is_mode_case:
explain_mode = mode
elif is_extended_as_mode:
explain_mode = extended
print(self._sc._jvm.PythonSQLUtils.explainString(self._jdf.queryExecution(), explain_mode))
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
As standard in SQL, this function resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
def isStreaming(self):
"""Returns ``True`` if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
return self._jdf.isStreaming()
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
.. versionadded:: 1.3.0
Parameters
----------
n : int, optional
Number of rows to show.
truncate : bool or int, optional
If set to ``True``, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
vertical : bool, optional
If set to ``True``, print output rows vertically (one line
per column value).
Examples
--------
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if not isinstance(n, int) or isinstance(n, bool):
raise TypeError("Parameter 'n' (number of rows) must be an int")
if not isinstance(vertical, bool):
raise TypeError("Parameter 'vertical' must be a bool")
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
try:
int_truncate = int(truncate)
except ValueError:
raise TypeError(
"Parameter 'truncate={}' should be either bool or int.".format(truncate))
print(self._jdf.showString(n, int_truncate, vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a :class:`DataFrame` with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: html_escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: html_escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this :class:`DataFrame`, which is especially useful in iterative algorithms
where the plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with :meth:`SparkContext.setCheckpointDir`.
.. versionadded:: 2.1.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this :class:`DataFrame`, which is especially useful in
iterative algorithms where the plan may grow exponentially. Local checkpoints are
stored in the executors using the caching subsystem and therefore they are not reliable.
.. versionadded:: 2.3.0
Parameters
----------
eager : bool, optional
Whether to checkpoint this :class:`DataFrame` immediately
Notes
-----
This API is experimental.
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
.. versionadded:: 2.1.0
Parameters
----------
eventTime : str
the name of the column that contains the event time of the row.
delayThreshold : str
the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
Notes
-----
This API is evolving.
>>> from pyspark.sql.functions import timestamp_seconds
>>> sdf.select(
... 'name',
... timestamp_seconds(sdf.time).alias('time')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
def hint(self, name, *parameters):
"""Specifies some hint on the current :class:`DataFrame`.
.. versionadded:: 2.2.0
Parameters
----------
name : str
A name of the hint.
parameters : str, list, float or int
Optional parameters.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
allowed_types = (str, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise TypeError(
"all parameters should be in {0}, got {1} of type {2}".format(
allowed_types, p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.count()
2
"""
return int(self._jdf.count())
def collect(self):
"""Returns all the records as a list of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def toLocalIterator(self, prefetchPartitions=False):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this
:class:`DataFrame`. With prefetch it may consume up to the memory of the 2 largest
partitions.
.. versionadded:: 2.0.0
Parameters
----------
prefetchPartitions : bool, optional
If Spark should pre-fetch the next partition before it is needed.
Examples
--------
>>> list(df.toLocalIterator())
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator(prefetchPartitions)
return _local_iterator_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
def limit(self, num):
"""Limits the result count to the number specified.
.. versionadded:: 1.3.0
Examples
--------
>>> df.limit(1).collect()
[Row(age=2, name='Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.take(2)
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
"""
return self.limit(num).collect()
def tail(self, num):
"""
Returns the last ``num`` rows as a :class:`list` of :class:`Row`.
Running tail requires moving data into the application's driver process, and doing so with
a very large ``num`` can crash the driver process with OutOfMemoryError.
.. versionadded:: 3.0.0
Examples
--------
>>> df.tail(1)
[Row(age=5, name='Bob')]
"""
with SCCallSiteSync(self._sc):
sock_info = self._jdf.tailToPython(num)
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
.. versionadded:: 1.3.0
Examples
--------
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (`MEMORY_AND_DISK`).
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK` to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK_DESER):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (`MEMORY_AND_DISK_DESER`)
.. versionadded:: 1.3.0
Notes
-----
The default storage level has changed to `MEMORY_AND_DISK_DESER` to match Scala in 3.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
.. versionadded:: 2.1.0
Examples
--------
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. versionadded:: 1.3.0
Notes
-----
`blocking` default has changed to ``False`` to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
.. versionadded:: 1.4.0
Parameters
----------
numPartitions : int
specify the target number of partitions
Examples
--------
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is hash partitioned.
.. versionadded:: 1.3.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
Examples
--------
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition(3, "name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting :class:`DataFrame` is range partitioned.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
.. versionadded:: 2.4.0
Parameters
----------
numPartitions : int
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
cols : str or :class:`Column`
partitioning columns.
Notes
-----
Due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
Examples
--------
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (str, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
withReplacement : bool, optional
Sample with replacement or not (default ``False``).
fraction : float, optional
Fraction of rows to generate, range [0.0, 1.0].
seed : int, optional
Seed for sampling (default a random seed).
Notes
-----
This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
`fraction` is required and, `withReplacement` and `seed` are optional.
Examples
--------
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = int(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`Column` or str
column that defines strata
.. versionchanged:: 3.0
Added sampling by a column of :class:`Column`
fractions : dict
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
seed : int, optional
random seed
Returns
-------
a new :class:`DataFrame` that represents the stratified sample
Examples
--------
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 3|
| 1| 6|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
"""
if isinstance(col, str):
col = Column(col)
elif not isinstance(col, Column):
raise TypeError("col must be a string or a column, but got %r" % type(col))
if not isinstance(fractions, dict):
raise TypeError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, str)):
raise TypeError("key must be float, int, or string, but got %r" % type(k))
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
.. versionadded:: 1.4.0
Parameters
----------
weights : list
list of doubles as weights with which to split the :class:`DataFrame`.
Weights will be normalized if they don't sum up to 1.0.
seed : int, optional
The seed for sampling.
Examples
--------
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
2
>>> splits[1].count()
2
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), int(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
def dtypes(self):
"""Returns all column names and their data types as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
def columns(self):
"""Returns all column names as a list.
.. versionadded:: 1.3.0
Examples
--------
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
.. versionadded:: 2.3.0
Parameters
----------
colName : str
string, column name specified as a regex.
Examples
--------
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, str):
raise TypeError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
.. versionadded:: 1.3.0
Parameters
----------
alias : str
an alias name to be set for the :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age") \
.sort(desc("df_as1.name")).collect()
[Row(name='Bob', name='Bob', age=5), Row(name='Alice', name='Alice', age=2)]
"""
assert isinstance(alias, str), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
.. versionadded:: 2.1.0
Parameters
----------
other : :class:`DataFrame`
Right side of the cartesian product.
Examples
--------
>>> df.select("age", "name").collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df2.select("name", "height").collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name='Alice', height=80), Row(age=2, name='Alice', height=85),
Row(age=5, name='Bob', height=80), Row(age=5, name='Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
.. versionadded:: 1.3.0
Parameters
----------
other : :class:`DataFrame`
Right side of the join
on : str, list or :class:`Column`, optional
a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
how : str, optional
default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``fullouter``, ``full_outer``, ``left``, ``leftouter``, ``left_outer``,
``right``, ``rightouter``, ``right_outer``, ``semi``, ``leftsemi``, ``left_semi``,
``anti``, ``leftanti`` and ``left_anti``.
Examples
--------
The following performs a full outer join between ``df1`` and ``df2``.
>>> from pyspark.sql.functions import desc
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height) \
.sort(desc("name")).collect()
[Row(name='Bob', height=85), Row(name='Alice', height=None), Row(name=None, height=80)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').sort(desc("name")).collect()
[Row(name='Tom', height=80), Row(name='Bob', height=85), Row(name='Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name='Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name='Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], str):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, str), "how should be a string"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
.. versionadded:: 1.6.0
Parameters
----------
cols : str, list or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
.. versionadded:: 1.3.0
Parameters
----------
cols : str, list, or :class:`Column`, optional
list of :class:`Column` or column names to sort by.
Other Parameters
----------------
ascending : bool or list, optional
boolean or list of boolean (default ``True``).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
Examples
--------
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name='Bob'), Row(age=2, name='Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
.. versionadded:: 1.3.1
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Use summary for expanded statistics and control over which statistics to compute.
Examples
--------
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
See Also
--------
DataFrame.summary
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (e.g., 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. versionadded:: 2.3.0
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
Examples
--------
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See Also
--------
DataFrame.display
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. versionadded:: 1.3.0
Notes
-----
This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
n : int, optional
default 1. Number of rows to return.
Returns
-------
If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
Examples
--------
>>> df.head()
Row(age=2, name='Alice')
>>> df.head(1)
[Row(age=2, name='Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
def first(self):
"""Returns the first row as a :class:`Row`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.first()
Row(age=2, name='Alice')
"""
return self.head()
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name='Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name='Bob')]
"""
if isinstance(item, str):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
.. versionadded:: 1.3.0
Parameters
----------
cols : str, :class:`Column`, or list
column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current :class:`DataFrame`.
Examples
--------
>>> df.select('*').collect()
[Row(age=2, name='Alice'), Row(age=5, name='Bob')]
>>> df.select('name', 'age').collect()
[Row(name='Alice', age=2), Row(name='Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name='Alice', age=12), Row(name='Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
.. versionadded:: 1.3.0
Examples
--------
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
.. versionadded:: 1.3.0
Parameters
----------
condition : :class:`Column` or str
a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
Examples
--------
>>> df.filter(df.age > 3).collect()
[Row(age=5, name='Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name='Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name='Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name='Alice')]
"""
if isinstance(condition, str):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
.. versionadded:: 1.3.0
Parameters
----------
cols : list, str or :class:`Column`
columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
Examples
--------
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name='Alice', avg(age)=2.0), Row(name='Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name='Alice', age=2, count=1), Row(name='Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregations on them.
.. versionadded:: 1.4.0
Examples
--------
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy().agg()``).
.. versionadded:: 1.3.0
Examples
--------
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return self.union(other)
def unionByName(self, other, allowMissingColumns=False):
""" Returns a new :class:`DataFrame` containing union of rows in this and another
:class:`DataFrame`.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
.. versionadded:: 2.3.0
Examples
--------
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
When the parameter `allowMissingColumns` is ``True``, the set of column names
in this and other :class:`DataFrame` can differ; missing columns will be filled with null.
Further, the missing columns of this :class:`DataFrame` will be added at the end
in the schema of the union result:
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col3"])
>>> df1.unionByName(df2, allowMissingColumns=True).show()
+----+----+----+----+
|col0|col1|col2|col3|
+----+----+----+----+
| 1| 2| 3|null|
|null| 4| 5| 6|
+----+----+----+----+
.. versionchanged:: 3.1.0
Added optional argument `allowMissingColumns` to specify whether to allow
missing columns.
"""
return DataFrame(self._jdf.unionByName(other._jdf, allowMissingColumns), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this :class:`DataFrame` and another :class:`DataFrame`.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this :class:`DataFrame`
and another :class:`DataFrame` while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL. As standard in SQL, this function
resolves columns by position (not by name).
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this :class:`DataFrame`
but not in another :class:`DataFrame`.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
|Alice| 10| 80|
+-----+---+------+
>>> df.dropDuplicates(['name', 'height']).show()
+-----+---+------+
| name|age|height|
+-----+---+------+
|Alice| 5| 80|
+-----+---+------+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
how : str, optional
'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
thresh: int, optional
default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
subset : str, tuple or list, optional
optional list of column names to consider.
Examples
--------
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise TypeError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
.. versionadded:: 1.3.1
Parameters
----------
value : int, float, string, bool or dict
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, float, boolean, or string.
subset : str, tuple or list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, str, bool, dict)):
raise TypeError("value should be a float, int, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, int):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, str):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise TypeError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
.. versionadded:: 1.4.0
Parameters
----------
to_replace : bool, int, float, string, list or dict
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
value : bool, int, float, string or None, optional
The replacement value must be a bool, int, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
subset : list, optional
optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
Examples
--------
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(str)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(str)
all_of_numeric = all_of((float, int))
# Validate input types
valid_types = (bool, float, int, str, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise TypeError(
"to_replace should be a bool, float, int, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise TypeError("If to_replace is not a dict, value should be "
"a bool, float, int, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, str))):
raise TypeError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, str)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, str)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, str):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
:class:`DataFrame`.
The result of this algorithm has the following deterministic bound:
If the :class:`DataFrame` has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the :class:`DataFrame` so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
.. versionadded:: 2.0.0
Parameters
----------
col: str, tuple or list
Can be a single column name, or a list of names for multiple columns.
.. versionchanged:: 2.2
Added support for multiple columns.
probabilities : list or tuple
a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
relativeError : float
The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
Returns
-------
list
the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
"""
if not isinstance(col, (str, list, tuple)):
raise TypeError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, str)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, str):
raise TypeError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise TypeError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int)):
raise TypeError("relativeError should be numerical (float, int)")
if relativeError < 0:
raise ValueError("relativeError should be >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a :class:`DataFrame` as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
method : str, optional
The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, str):
raise TypeError("col1 should be a string.")
if not isinstance(col2, str):
raise TypeError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column
col2 : str
The name of the second column
"""
if not isinstance(col1, str):
raise TypeError("col1 should be a string.")
if not isinstance(col2, str):
raise TypeError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
col1 : str
The name of the first column. Distinct items will make the first item of
each row.
col2 : str
The name of the second column. Distinct items will make the column names
of the :class:`DataFrame`.
"""
if not isinstance(col1, str):
raise TypeError("col1 should be a string.")
if not isinstance(col2, str):
raise TypeError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. versionadded:: 1.4.0
Parameters
----------
cols : list or tuple
Names of the columns to calculate frequent items for as a list or tuple of
strings.
support : float, optional
The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
Notes
-----
This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting
:class:`DataFrame`.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise TypeError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this :class:`DataFrame`; attempting to add
a column from some other :class:`DataFrame` will raise an error.
.. versionadded:: 1.3.0
Parameters
----------
colName : str
string, name of the new column.
col : :class:`Column`
a :class:`Column` expression for the new column.
Notes
-----
This method introduces a projection internally. Therefore, calling it multiple
times, for instance, via loops in order to add multiple columns can generate big
plans which can cause performance issues and even `StackOverflowException`.
To avoid this, use :func:`select` with the multiple columns at once.
Examples
--------
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name='Alice', age2=4), Row(age=5, name='Bob', age2=7)]
"""
if not isinstance(col, Column):
raise TypeError("col should be Column")
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
.. versionadded:: 1.3.0
Parameters
----------
existing : str
string, name of the existing column to rename.
new : str
string, new name of the column.
Examples
--------
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name='Alice'), Row(age2=5, name='Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
.. versionadded:: 1.4.0
Parameters
----------
cols: str or :class:`Column`
a name of the column, or the :class:`Column` to drop
Examples
--------
>>> df.drop('age').collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.drop(df.age).collect()
[Row(name='Alice'), Row(name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name='Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name='Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name='Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, str):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, str):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def toDF(self, *cols):
"""Returns a new :class:`DataFrame` that with new specified column names
Parameters
----------
cols : str
new column names
Examples
--------
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2='Alice'), Row(f1=5, f2='Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
def transform(self, func):
"""Returns a new :class:`DataFrame`. Concise syntax for chaining custom transformations.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a function that takes and returns a :class:`DataFrame`.
Examples
--------
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_int(input_df):
... return input_df.select([col(col_name).cast("int") for col_name in input_df.columns])
>>> def sort_columns_asc(input_df):
... return input_df.select(*sorted(input_df.columns))
>>> df.transform(cast_all_to_int).transform(sort_columns_asc).show()
+-----+---+
|float|int|
+-----+---+
| 1| 1|
| 2| 2|
+-----+---+
"""
result = func(self)
assert isinstance(result, DataFrame), "Func returned an instance of type [%s], " \
"should have been DataFrame." % type(result)
return result
def sameSemantics(self, other):
"""
Returns `True` when the logical query plans inside both :class:`DataFrame`\\s are equal and
therefore return same results.
.. versionadded:: 3.1.0
Notes
-----
The equality comparison here is simplified by tolerating the cosmetic differences
such as attribute names.
This API can compare both :class:`DataFrame`\\s very fast but can still return
`False` on the :class:`DataFrame` that return the same results, for instance, from
different plans. Such false negative semantic can be useful when caching as an example.
This API is a developer API.
Examples
--------
>>> df1 = spark.range(10)
>>> df2 = spark.range(10)
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id * 2))
True
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col1", df2.id + 2))
False
>>> df1.withColumn("col1", df1.id * 2).sameSemantics(df2.withColumn("col0", df2.id * 2))
True
"""
if not isinstance(other, DataFrame):
raise TypeError("other parameter should be of DataFrame; however, got %s"
% type(other))
return self._jdf.sameSemantics(other._jdf)
def semanticHash(self):
"""
Returns a hash code of the logical query plan against this :class:`DataFrame`.
.. versionadded:: 3.1.0
Notes
-----
Unlike the standard hash code, the hash is calculated against the query plan
simplified by tolerating the cosmetic differences such as attribute names.
This API is a developer API.
Examples
--------
>>> spark.range(10).selectExpr("id as col0").semanticHash() # doctest: +SKIP
1855039936
>>> spark.range(10).selectExpr("id as col1").semanticHash() # doctest: +SKIP
1855039936
"""
return self._jdf.semanticHash()
def inputFiles(self):
"""
Returns a best-effort snapshot of the files that compose this :class:`DataFrame`.
This method simply asks each constituent BaseRelation for its respective files and
takes the union of all results. Depending on the source relations, this may not find
all input files. Duplicates are removed.
.. versionadded:: 3.1.0
Examples
--------
>>> df = spark.read.load("examples/src/main/resources/people.json", format="json")
>>> len(df.inputFiles())
1
"""
return list(self._jdf.inputFiles())
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
# Two aliases below were added for pandas compatibility many years ago.
# There are too many differences compared to pandas and we cannot just
# make it "compatible" by adding aliases. Therefore, we stop adding such
# aliases as of Spark 3.0. Two methods below remain just
# for legacy users currently.
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
def writeTo(self, table):
"""
Create a write configuration builder for v2 sources.
This builder is used to configure and execute write operations.
For example, to append or create or replace existing tables.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").append() # doctest: +SKIP
>>> df.writeTo( # doctest: +SKIP
... "catalog.db.table"
... ).partitionedBy("col").createOrReplace()
"""
return DataFrameWriterV2(self, table)
def to_pandas_on_spark(self, index_col=None):
"""
Converts the existing DataFrame into a pandas-on-Spark DataFrame.
If a pandas-on-Spark DataFrame is converted to a Spark DataFrame and then back
to pandas-on-Spark, it will lose the index information and the original index
will be turned into a normal column.
This is only available if Pandas is installed and available.
Parameters
----------
index_col: str or list of str, optional, default: None
Index column of table in Spark.
See Also
--------
pyspark.pandas.frame.DataFrame.to_spark
Examples
--------
>>> df.show() # doctest: +SKIP
+----+----+
|Col1|Col2|
+----+----+
| a| 1|
| b| 2|
| c| 3|
+----+----+
>>> df.to_pandas_on_spark() # doctest: +SKIP
Col1 Col2
0 a 1
1 b 2
2 c 3
We can specify the index columns.
>>> df.to_pandas_on_spark(index_col="Col1"): # doctest: +SKIP
Col2
Col1
a 1
b 2
c 3
"""
from pyspark.pandas.namespace import _get_index_map
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import InternalFrame
index_spark_columns, index_names = _get_index_map(self, index_col)
internal = InternalFrame(
spark_frame=self, index_spark_columns=index_spark_columns, index_names=index_names
)
return DataFrame(internal)
# Keep to_koalas for backward compatibility for now.
def to_koalas(self, index_col=None):
warnings.warn(
"DataFrame.to_koalas is deprecated. Use DataFrame.to_pandas_on_spark instead.",
FutureWarning,
)
return self.to_pandas_on_spark(index_col)
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(height=80, name='Tom'), Row(height=85, name='Bob')]).toDF()
globs['df3'] = sc.parallelize([Row(age=2, name='Alice'),
Row(age=5, name='Bob')]).toDF()
globs['df4'] = sc.parallelize([Row(age=10, height=80, name='Alice'),
Row(age=5, height=None, name='Bob'),
Row(age=None, height=None, name='Tom'),
Row(age=None, height=None, name=None)]).toDF()
globs['df5'] = sc.parallelize([Row(age=10, name='Alice', spy=False),
Row(age=5, name='Bob', spy=None),
Row(age=None, name='Mallory', spy=True)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
white-lab/pyproteome | pyproteome/pathways/photon_ptm.py | 1 | 5508 |
from collections import OrderedDict
import io
import logging
import requests
import os
import tarfile
import uuid
import pyproteome as pyp
import brainrnaseq as brs
import pandas as pd
import numpy as np
LOGGER = logging.getLogger('pathways.photon_ptm')
try:
from genemap.mappers import EnsemblMapper
except ImportError:
pass
parameters = {
'activity': {
'min_size': 4,
'permutations': 1000,
'side': 'greater',
},
'anat': {
'alpha': 0.25,
'anchor': -1,
},
'go': {
'max_category_size': 500,
'min_category_size': 5,
'max_category_depth': 10,
},
'ppi-network': {
'confidence': 0.5,
'degree_threshold': 150,
}
}
@pyp.utils.memoize
def _get_anat(dir, force=False):
if os.path.exists(os.path.join(dir, 'db')) and not force:
return
url = 'http://cs.tau.ac.il/~jandanielr/db.tar.gz'
LOGGER.info('Fetching PHOTON database from {} to {}'.format(url, dir))
r = requests.get(url, stream=True)
r.raw.decode_content = True
r.raise_for_status()
tar_file = tarfile.open(
fileobj=io.BytesIO(r.content),
mode='r',
)
tar_file.extractall(dir)
return tar_file
@pyp.utils.memoize
def _map_gene(mapper, symbol_mapping, gene, species):
symbol = gene
entrez = brs.mapping.get_entrez_mapping(symbol, species=species)
if species.lower().replace(' ', '_') not in ['homo_sapiens', 'human']:
entrez = [i for i in mapper.map_ids([entrez]) if i]
if not entrez:
return None, None
entrez = int(entrez[0])
symbol = symbol_mapping.loc[entrez]
return entrez, symbol
def _get_templates(template_dir):
pyp.utils.makedirs(template_dir)
url = (
'https://raw.githubusercontent.com/jdrudolph/photon/'
'c73d1eb7f5e7cab86031e056350c7b09fb5e1d51/templates/result.html'
)
r = requests.get(url)
r.raise_for_status()
with open(os.path.join(template_dir, 'result.html'), 'wb') as f:
f.write(r.content)
def photon(ds, folder_name=None, write_output=False):
'''
Run PHOTON algorithm on a data set to find functional phosphorylation sites
using protein-protein interaction networks.
Parameters
----------
ds : :class:`pyproteome.data_sets.DataSet`
folder_name : str, optional
Returns
-------
out_path : str
Path to results directory.
'''
import phos
import phos.defaults
import phos.pipeline
ds = ds.filter(fn=lambda x: len(x['Proteins']) < 2)
ds = ds.filter(fn=lambda x: len(x['Scan']) >= 2)
ds = ds.filter(mod='Phospho')
species = list(ds.species)[0]
from_name = '{}{}'.format(
species.split(' ')[0][0],
species.split(' ')[1],
).lower()
species = species.replace(' ', '_')
mapper = EnsemblMapper(
from_type='entrez',
to_type='entrez',
from_organism=from_name,
to_organism='hsapiens',
)
symbol_mapping = brs.cache.get_mapping_data(species='Homo sapiens')
symbol_mapping['Symbol'] = symbol_mapping.index
symbol_mapping = symbol_mapping.set_index('GeneID')['Symbol']
def _get_phos_data(psms):
hit = 0
miss = 0
for _, row in psms.iterrows():
gene = row['Proteins'].genes[0]
entrez, symbol = _map_gene(mapper, symbol_mapping, gene, species)
if not entrez:
# print(gene, entrez, symbol)
miss += 1
continue
hit += 1
for mod in row['Modifications'].get_mods('Phospho'):
yield pd.Series(OrderedDict([
('GeneID', entrez),
('Amino.Acid', mod.letter),
('Position', 1 + mod.abs_pos[0]),
('avg', np.log2(row['Fold Change'])),
('Symbol', symbol),
]))
print(hit, miss)
LOGGER.info('Generated data frame: {}, {}'.format(ds.name, ds.shape))
df = pd.DataFrame(
list(_get_phos_data(ds.psms))
).dropna()
LOGGER.info('Generated data frame: {}'.format(df.shape))
df = df.sort_values('avg', ascending=False)
name = str(uuid.uuid4())
_parameters = parameters.copy()
_parameters['anat']['anchor'] = 1950
# XXX: Better directory for files that Photon must download for its use?
dir = os.path.join(pyp.paths.FIGURES_DIR)
defaults = phos.defaults.make_defaults(dir)
_get_anat(dir)
template_dir = os.path.join(defaults['root'], 'templates')
_get_templates(template_dir)
folder_name = pyp.utils.make_folder(
data=ds,
folder_name=folder_name,
sub=os.path.join('Photon', name),
)
csv_path = os.path.join(folder_name, 'results.csv')
with open(csv_path, 'w') as csv_file:
df.to_csv(csv_file, index=False)
if write_output:
phos.pipeline.run(
name,
csv_path,
_parameters,
template_dir,
folder_name,
defaults['db'],
)
LOGGER.info('Wrote results to: {}'.format(folder_name))
return folder_name
else:
exp, scores, subnet, go_scores, predictions = phos.pipeline._run(
name,
csv_path,
_parameters,
# template_dir,
# folder_name,
defaults['db'],
)
return exp, scores, subnet, go_scores, predictions
| bsd-2-clause |
SteVwonder/MusiGraph | musigraph.py | 1 | 6030 | import requests
import json
import argparse
from hashlib import md5
import networkx as nx
import pygraphviz as pgv
import matplotlib.pyplot as plt
API_ROOT = "http://ws.audioscrobbler.com/2.0/"
class ConfigException(Exception):
pass
class APIException(Exception):
pass
def parse_config(config_path):
with open(config_path, 'r') as json_file:
config = json.loads(json_file.read())
if 'lastfm' not in config:
raise ConfigException('lastfm not in config')
return config
def save_config(config_path, config_dict):
with open(config_path, 'w') as json_file:
json_dump = json.dumps(config_dict, sort_keys=True, indent=4, separators=(',', ': '))
json_file.write(json_dump)
class LastFm:
def __init__(self, config_path):
config = parse_config(config_path)
lastfm_config = config['lastfm']
if 'api_key' not in lastfm_config:
raise ConfigException('api_key not in lastfm_config')
if 'secret' not in lastfm_config:
raise ConfigException('secret not in lastfm_config')
if 'session_key' not in lastfm_config:
lastfm_config['session_key'] = ""
for key in lastfm_config:
setattr(self, key, lastfm_config[key])
if self.session_key == "":
session = self.get_session()
self.session_key = session['key']
self.username = session['name']
lastfm_config['session_key'] = self.session_key
lastfm_config['username'] = self.username
save_config(config_path, config)
def build_signature(self, param_tuples):
param_tuples.sort(key=lambda tup: tup[0])
sig_str = []
for tup in param_tuples:
sig_str.append(str(tup[0]).encode('utf-8'))
sig_str.append(str(tup[1]).encode('utf-8'))
sig_str.append(str(self.secret).encode('utf-8'))
sig_str = "".join(sig_str)
return md5(sig_str).hexdigest()
def get(self, method, signature=False, **kwargs):
params = {}
params['method'] = method
params['api_key'] = self.api_key
params.update(kwargs)
if signature:
params['api_sig'] = self.build_signature(params.items())
params['format'] = 'json'
r = requests.get(API_ROOT, params=params)
json_resp = r.json()
if 'error' in json_resp:
print "Error Message:", json_resp['message']
raise APIException(str(json_resp['error']))
return r
def get_request_token(self):
method = 'auth.gettoken'
json_resp = self.get(method, signature=True).json()
return json_resp['token']
def get_session(self):
request_token = self.get_request_token()
self.authorize_user(request_token)
method = 'auth.getSession'
json_resp = self.get(method, signature=True, token=request_token).json()
return json_resp['session']
def authorize_user(self, request_token):
from webbrowser import open as wb_open
wb_open("http://www.last.fm/api/auth/?api_key={0}&token={1}".format(self.api_key, request_token))
raw_input("Press any button once you have authorized MusiGraph to use your Last.fm account")
def get_top_artists(self):
method = 'user.gettopartists'
user = self.username
limit = 50
period = 'overall'
json_resp = self.get(method, user=user, limit=limit, period=period).json()
return json_resp['topartists']['artist']
def get_similar_artists(self, artist):
method = 'artist.getSimilar'
limit = 10
json_resp = self.get(method, artist=artist, limit=limit).json()
return json_resp['similarartists']['artist']
def build_graph(nodes, edges):
G = nx.Graph()
G.add_nodes_from(nodes)
for name, obj in nodes.iteritems():
color_tuple = obj.get_color()
color_dict = {'a':1.0, 'r':color_tuple[0], 'g':color_tuple[1], 'b':color_tuple[2]}
viz_dict = {'color':color_dict, 'size': obj.get_value()}
G.node[name]['viz']= viz_dict
G.add_edges_from(edges)
return G
def get_similar_artists(lastfm, artists):
output_list = []
for artist in artists:
similar_artists = lastfm.get_similar_artists(artist)
similar_artists = map(lambda x: x['name'], similar_artists)
output_list.append((artist, similar_artists))
return output_list
class Node():
def __init__(self, name, value=1, color=(84, 84, 84)):
self.name = name
self.value = value
self.color = color
def get_name(self):
return self.name
def get_value(self):
return self.value
def get_color(self):
return self.color
def set_value(self, value):
self.value = value
def set_color(self, color):
self.color = color
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="path to alternate config file", default="./config.json", type=str)
args = parser.parse_args()
lastfm = LastFm(args.config)
artists = lastfm.get_top_artists()
artists = map(lambda x: x['name'], artists)
similar_artists = get_similar_artists(lastfm, artists)
edges = []
nodes = {}
map(lambda row: row[0], similar_artists)
for row in similar_artists:
if row[0] not in nodes:
nodes[row[0]] = Node(row[0], color=(0,0,205))
else:
node = nodes[row[0]]
node.set_value(node.get_value() + 1)
node.set_color((0,0,205))
for similar_artist in row[1]:
if similar_artist not in nodes:
nodes[similar_artist] = Node(similar_artist, color=(238,0,0))
else:
node = nodes[similar_artist]
node.set_value(node.get_value() + 1)
edges.append((row[0], similar_artist))
graph = build_graph(nodes, edges)
nx.write_gexf(graph,'graph.gexf')
if __name__ == "__main__":
main()
| apache-2.0 |
Srisai85/scikit-learn | sklearn/calibration.py | 137 | 18876 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
bittremieux/ANN-SoLo | src/setup.py | 1 | 3715 | import codecs
import os
import setuptools
import numpy as np
try:
import Cython.Distutils
except ImportError:
use_cython = False
else:
use_cython = True
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
# Intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError('Unable to find version string')
DISTNAME = 'ann_solo'
# https://packaging.python.org/guides/single-sourcing-package-version/
VERSION = get_version('ann_solo/__init__.py')
DESCRIPTION = 'Spectral library search engine optimized for fast open ' \
'modification searching'
with open('README.md') as f_in:
LONG_DESCRIPTION = f_in.read()
AUTHOR = 'Wout Bittremieux'
AUTHOR_EMAIL = 'wout.bittremieux@uantwerpen.be'
URL = 'https://github.com/bittremieux/ANN-SoLo'
LICENSE = 'Apache 2.0'
compile_args = ['-O3', '-march=native', '-ffast-math', '-fno-associative-math',
'-std=c++14']
cython_directives = {'boundscheck': False, 'wraparound': False,
'initializedcheck': False, 'language_level': 3}
ext_spectrum_match = setuptools.Extension(
'ann_solo.spectrum_match',
['ann_solo/spectrum_match.pyx', 'ann_solo/SpectrumMatch.cpp'],
language='c++', extra_compile_args=compile_args,
extra_link_args=compile_args, include_dirs=[np.get_include()])
ext_spectrum_match.cython_directives = cython_directives
ext_parsers = setuptools.Extension(
'ann_solo.parsers', ['ann_solo/parsers.pyx'],
language='c++', extra_compile_args=compile_args,
extra_link_args=compile_args, include_dirs=[np.get_include()])
ext_parsers.cython_directives = cython_directives
cmdclass = {}
if use_cython:
cmdclass.update({'build_ext': Cython.Distutils.build_ext})
setuptools.setup(
name=DISTNAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url=URL,
license=LICENSE,
platforms=['any'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Programming Language :: C++',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering :: Bio-Informatics'],
packages=['ann_solo'],
entry_points={
'console_scripts': ['ann_solo = ann_solo.ann_solo:main',
'ann_solo_plot = ann_solo.plot_ssm:main']},
cmdclass=cmdclass,
install_requires=[
'ConfigArgParse',
'Cython',
'faiss',
'joblib',
'matplotlib',
'mmh3',
'numba>=0.41',
'numexpr',
'numpy',
'pandas',
'pyteomics',
'scipy',
'spectrum_utils>=0.3.0',
'tqdm'],
setup_requires=[
'Cython',
'numpy'],
ext_modules=[ext_spectrum_match, ext_parsers],
)
| apache-2.0 |
kevin-intel/scikit-learn | examples/miscellaneous/plot_display_object_visualization.py | 17 | 3676 | """
===================================
Visualizations with Display Objects
===================================
.. currentmodule:: sklearn.metrics
In this example, we will construct display objects,
:class:`ConfusionMatrixDisplay`, :class:`RocCurveDisplay`, and
:class:`PrecisionRecallDisplay` directly from their respective metrics. This
is an alternative to using their corresponding plot functions when
a model's predictions are already computed or expensive to compute. Note that
this is advanced usage, and in general we recommend using their respective
plot functions.
"""
print(__doc__)
# %%
# Load Data and train model
# -------------------------
# For this example, we load a blood transfusion service center data set from
# `OpenML <https://www.openml.org/d/1464>`. This is a binary classification
# problem where the target is whether an individual donated blood. Then the
# data is split into a train and test dataset and a logistic regression is
# fitted wtih the train dataset.
from sklearn.datasets import fetch_openml
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
X, y = fetch_openml(data_id=1464, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y)
clf = make_pipeline(StandardScaler(), LogisticRegression(random_state=0))
clf.fit(X_train, y_train)
# %%
# Create :class:`ConfusionMatrixDisplay`
##############################################################################
# With the fitted model, we compute the predictions of the model on the test
# dataset. These predictions are used to compute the confustion matrix which
# is plotted with the :class:`ConfusionMatrixDisplay`
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cm_display = ConfusionMatrixDisplay(cm).plot()
# %%
# Create :class:`RocCurveDisplay`
##############################################################################
# The roc curve requires either the probabilities or the non-thresholded
# decision values from the estimator. Since the logistic regression provides
# a decision function, we will use it to plot the roc curve:
from sklearn.metrics import roc_curve
from sklearn.metrics import RocCurveDisplay
y_score = clf.decision_function(X_test)
fpr, tpr, _ = roc_curve(y_test, y_score, pos_label=clf.classes_[1])
roc_display = RocCurveDisplay(fpr=fpr, tpr=tpr).plot()
# %%
# Create :class:`PrecisionRecallDisplay`
##############################################################################
# Similarly, the precision recall curve can be plotted using `y_score` from
# the prevision sections.
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import PrecisionRecallDisplay
prec, recall, _ = precision_recall_curve(y_test, y_score,
pos_label=clf.classes_[1])
pr_display = PrecisionRecallDisplay(precision=prec, recall=recall).plot()
# %%
# Combining the display objects into a single plot
##############################################################################
# The display objects store the computed values that were passed as arguments.
# This allows for the visualizations to be easliy combined using matplotlib's
# API. In the following example, we place the displays next to each other in a
# row.
# sphinx_gallery_thumbnail_number = 4
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
roc_display.plot(ax=ax1)
pr_display.plot(ax=ax2)
plt.show()
| bsd-3-clause |
Deepomatic/DIGITS | digits/dataset/generic/views.py | 3 | 7099 | # Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import caffe_pb2
import flask
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image
from .forms import GenericDatasetForm
from .job import GenericDatasetJob
from digits import extensions, utils
from digits.utils.constants import COLOR_PALETTE_ATTRIBUTE
from digits.utils.routing import request_wants_json, job_from_request
from digits.utils.lmdbreader import DbReader
from digits.webapp import scheduler
blueprint = flask.Blueprint(__name__, __name__)
@blueprint.route('/new/<extension_id>', methods=['GET'])
@utils.auth.requires_login
def new(extension_id):
"""
Returns a form for a new GenericDatasetJob
"""
form = GenericDatasetForm()
# Is there a request to clone a job with ?clone=<job_id>
utils.forms.fill_form_if_cloned(form)
extension = extensions.data.get_extension(extension_id)
if extension is None:
raise ValueError("Unknown extension '%s'" % extension_id)
extension_form = extension.get_dataset_form()
# Is there a request to clone a job with ?clone=<job_id>
utils.forms.fill_form_if_cloned(extension_form)
template, context = extension.get_dataset_template(extension_form)
rendered_extension = flask.render_template_string(template, **context)
return flask.render_template(
'datasets/generic/new.html',
extension_title=extension.get_title(),
extension_id=extension_id,
extension_html=rendered_extension,
form=form
)
@blueprint.route('/create/<extension_id>.json', methods=['POST'])
@blueprint.route('/create/<extension_id>',
methods=['POST'],
strict_slashes=False)
@utils.auth.requires_login(redirect=False)
def create(extension_id):
"""
Creates a new GenericDatasetJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = GenericDatasetForm()
form_valid = form.validate_on_submit()
extension_class = extensions.data.get_extension(extension_id)
extension_form = extension_class.get_dataset_form()
extension_form_valid = extension_form.validate_on_submit()
if not (extension_form_valid and form_valid):
# merge errors
errors = form.errors.copy()
errors.update(extension_form.errors)
template, context = extension_class.get_dataset_template(
extension_form)
rendered_extension = flask.render_template_string(
template,
**context)
if request_wants_json():
return flask.jsonify({'errors': errors}), 400
else:
return flask.render_template(
'datasets/generic/new.html',
extension_title=extension_class.get_title(),
extension_id=extension_id,
extension_html=rendered_extension,
form=form,
errors=errors), 400
# create instance of extension class
extension = extension_class(**extension_form.data)
job = None
try:
# create job
job = GenericDatasetJob(
username=utils.auth.get_username(),
name=form.dataset_name.data,
group=form.group_name.data,
backend=form.dsopts_backend.data,
feature_encoding=form.dsopts_feature_encoding.data,
label_encoding=form.dsopts_label_encoding.data,
batch_size=int(form.dsopts_batch_size.data),
num_threads=int(form.dsopts_num_threads.data),
force_same_shape=form.dsopts_force_same_shape.data,
extension_id=extension_id,
extension_userdata=extension.get_user_data(),
)
# Save form data with the job so we can easily clone it later.
utils.forms.save_form_to_job(job, form)
utils.forms.save_form_to_job(job, extension_form)
# schedule tasks
scheduler.add_job(job)
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for(
'digits.dataset.views.show',
job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
@blueprint.route('/explore', methods=['GET'])
def explore():
"""
Returns a gallery consisting of the images of one of the dbs
"""
job = job_from_request()
# Get LMDB
db = job.path(flask.request.args.get('db'))
db_path = job.path(db)
if (os.path.basename(db_path) == 'labels' and
COLOR_PALETTE_ATTRIBUTE in job.extension_userdata and
job.extension_userdata[COLOR_PALETTE_ATTRIBUTE]):
# assume single-channel 8-bit palette
palette = job.extension_userdata[COLOR_PALETTE_ATTRIBUTE]
palette = np.array(palette).reshape((len(palette) / 3, 3)) / 255.
# normalize input pixels to [0,1]
norm = mpl.colors.Normalize(vmin=0, vmax=255)
# create map
cmap = plt.cm.ScalarMappable(norm=norm,
cmap=mpl.colors.ListedColormap(palette))
else:
cmap = None
page = int(flask.request.args.get('page', 0))
size = int(flask.request.args.get('size', 25))
reader = DbReader(db_path)
count = 0
imgs = []
min_page = max(0, page - 5)
total_entries = reader.total_entries
max_page = min((total_entries - 1) / size, page + 5)
pages = range(min_page, max_page + 1)
for key, value in reader.entries():
if count >= page * size:
datum = caffe_pb2.Datum()
datum.ParseFromString(value)
if not datum.encoded:
raise RuntimeError("Expected encoded database")
s = StringIO()
s.write(datum.data)
s.seek(0)
img = PIL.Image.open(s)
if cmap and img.mode in ['L', '1']:
data = np.array(img)
data = cmap.to_rgba(data) * 255
data = data.astype('uint8')
# keep RGB values only, remove alpha channel
data = data[:, :, 0:3]
img = PIL.Image.fromarray(data)
imgs.append({"label": None, "b64": utils.image.embed_image_html(img)})
count += 1
if len(imgs) >= size:
break
return flask.render_template(
'datasets/images/explore.html',
page=page, size=size, job=job, imgs=imgs, labels=None,
pages=pages, label=None, total_entries=total_entries, db=db)
def show(job, related_jobs=None):
"""
Called from digits.dataset.views.show()
"""
return flask.render_template('datasets/generic/show.html', job=job, related_jobs=related_jobs)
def summary(job):
"""
Return a short HTML summary of a GenericDatasetJob
"""
return flask.render_template('datasets/generic/summary.html', dataset=job)
| bsd-3-clause |
russel1237/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
herilalaina/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 29 | 1873 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier(max_iter=100)),
("ASGD", SGDClassifier(average=True, max_iter=100)),
("Perceptron", Perceptron(tol=1e-3)),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0, tol=1e-4)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0, tol=1e-4)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
nchammas/spark | python/pyspark/sql/pandas/utils.py | 6 | 2633 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.23.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "1.0.0"
from distutils.version import LooseVersion
import os
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
if os.environ.get("ARROW_PRE_0_15_IPC_FORMAT", "0") == "1":
raise RuntimeError("Arrow legacy IPC format is not supported in PySpark, "
"please unset ARROW_PRE_0_15_IPC_FORMAT")
| apache-2.0 |
dmitriz/zipline | tests/pipeline/test_engine.py | 3 | 26134 | """
Tests for SimplePipelineEngine
"""
from __future__ import division
from collections import OrderedDict
from unittest import TestCase
from itertools import product
from numpy import (
array,
full,
nan,
tile,
zeros,
float32,
concatenate,
)
from pandas import (
DataFrame,
date_range,
Int64Index,
MultiIndex,
rolling_mean,
Series,
Timestamp,
)
from pandas.compat.chainmap import ChainMap
from pandas.util.testing import assert_frame_equal
from six import iteritems, itervalues
from testfixtures import TempDirectory
from zipline.pipeline.loaders.synthetic import (
ConstantLoader,
NullAdjustmentReader,
SyntheticDailyBarWriter,
)
from zipline.data.us_equity_pricing import BcolzDailyBarReader
from zipline.finance.trading import TradingEnvironment
from zipline.pipeline import Pipeline
from zipline.pipeline.data import USEquityPricing, DataSet, Column
from zipline.pipeline.loaders.frame import DataFrameLoader, MULTIPLY
from zipline.pipeline.loaders.equity_pricing_loader import (
USEquityPricingLoader,
)
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline import CustomFactor
from zipline.pipeline.factors import (
MaxDrawdown,
SimpleMovingAverage,
)
from zipline.utils.memoize import lazyval
from zipline.utils.test_utils import (
make_rotating_asset_info,
make_simple_asset_info,
product_upper_triangle,
check_arrays,
)
class RollingSumDifference(CustomFactor):
window_length = 3
inputs = [USEquityPricing.open, USEquityPricing.close]
def compute(self, today, assets, out, open, close):
out[:] = (open - close).sum(axis=0)
class AssetID(CustomFactor):
"""
CustomFactor that returns the AssetID of each asset.
Useful for providing a Factor that produces a different value for each
asset.
"""
window_length = 1
# HACK: We currently decide whether to load or compute a Term based on the
# length of its inputs. This means we have to provide a dummy input.
inputs = [USEquityPricing.close]
def compute(self, today, assets, out, close):
out[:] = assets
def assert_multi_index_is_product(testcase, index, *levels):
"""Assert that a MultiIndex contains the product of `*levels`."""
testcase.assertIsInstance(
index, MultiIndex, "%s is not a MultiIndex" % index
)
testcase.assertEqual(set(index), set(product(*levels)))
class ColumnArgs(tuple):
"""A tuple of Columns that defines equivalence based on the order of the
columns' DataSets, instead of the columns themselves. This is used when
comparing the columns passed to a loader's load_adjusted_array method,
since we want to assert that they are ordered by DataSet.
"""
def __new__(cls, *cols):
return super(ColumnArgs, cls).__new__(cls, cols)
@classmethod
def sorted_by_ds(cls, *cols):
return cls(*sorted(cols, key=lambda col: col.dataset))
def by_ds(self):
return tuple(col.dataset for col in self)
def __eq__(self, other):
return set(self) == set(other) and self.by_ds() == other.by_ds()
def __hash__(self):
return hash(frozenset(self))
class RecordingConstantLoader(ConstantLoader):
def __init__(self, *args, **kwargs):
super(RecordingConstantLoader, self).__init__(*args, **kwargs)
self.load_calls = []
def load_adjusted_array(self, columns, dates, assets, mask):
self.load_calls.append(ColumnArgs(*columns))
return super(RecordingConstantLoader, self).load_adjusted_array(
columns, dates, assets, mask,
)
class RollingSumSum(CustomFactor):
def compute(self, today, assets, out, *inputs):
assert len(self.inputs) == len(inputs)
out[:] = sum(inputs).sum(axis=0)
class ConstantInputTestCase(TestCase):
def setUp(self):
self.constants = {
# Every day, assume every stock starts at 2, goes down to 1,
# goes up to 4, and finishes at 3.
USEquityPricing.low: 1,
USEquityPricing.open: 2,
USEquityPricing.close: 3,
USEquityPricing.high: 4,
}
self.assets = [1, 2, 3]
self.dates = date_range('2014-01', '2014-03', freq='D', tz='UTC')
self.loader = ConstantLoader(
constants=self.constants,
dates=self.dates,
assets=self.assets,
)
self.asset_info = make_simple_asset_info(
self.assets,
start_date=self.dates[0],
end_date=self.dates[-1],
)
environment = TradingEnvironment()
environment.write_data(equities_df=self.asset_info)
self.asset_finder = environment.asset_finder
def test_bad_dates(self):
loader = self.loader
engine = SimplePipelineEngine(
lambda column: loader, self.dates, self.asset_finder,
)
p = Pipeline()
msg = "start_date must be before or equal to end_date .*"
with self.assertRaisesRegexp(ValueError, msg):
engine.run_pipeline(p, self.dates[2], self.dates[1])
def test_same_day_pipeline(self):
loader = self.loader
engine = SimplePipelineEngine(
lambda column: loader, self.dates, self.asset_finder,
)
factor = AssetID()
asset = self.assets[0]
p = Pipeline(columns={'f': factor}, screen=factor <= asset)
# The crux of this is that when we run the pipeline for a single day
# (i.e. start and end dates are the same) we should accurately get
# data for the day prior.
result = engine.run_pipeline(p, self.dates[1], self.dates[1])
self.assertEqual(result['f'][0], 1.0)
def test_screen(self):
loader = self.loader
finder = self.asset_finder
assets = array(self.assets)
engine = SimplePipelineEngine(
lambda column: loader, self.dates, self.asset_finder,
)
num_dates = 5
dates = self.dates[10:10 + num_dates]
factor = AssetID()
for asset in assets:
p = Pipeline(columns={'f': factor}, screen=factor <= asset)
result = engine.run_pipeline(p, dates[0], dates[-1])
expected_sids = assets[assets <= asset]
expected_assets = finder.retrieve_all(expected_sids)
expected_result = DataFrame(
index=MultiIndex.from_product([dates, expected_assets]),
data=tile(expected_sids.astype(float), [len(dates)]),
columns=['f'],
)
assert_frame_equal(result, expected_result)
def test_single_factor(self):
loader = self.loader
finder = self.asset_finder
assets = self.assets
engine = SimplePipelineEngine(
lambda column: loader, self.dates, self.asset_finder,
)
result_shape = (num_dates, num_assets) = (5, len(assets))
dates = self.dates[10:10 + num_dates]
factor = RollingSumDifference()
expected_result = -factor.window_length
# Since every asset will pass the screen, these should be equivalent.
pipelines = [
Pipeline(columns={'f': factor}),
Pipeline(
columns={'f': factor},
screen=factor.eq(expected_result),
),
]
for p in pipelines:
result = engine.run_pipeline(p, dates[0], dates[-1])
self.assertEqual(set(result.columns), {'f'})
assert_multi_index_is_product(
self, result.index, dates, finder.retrieve_all(assets)
)
check_arrays(
result['f'].unstack().values,
full(result_shape, expected_result),
)
def test_multiple_rolling_factors(self):
loader = self.loader
finder = self.asset_finder
assets = self.assets
engine = SimplePipelineEngine(
lambda column: loader, self.dates, self.asset_finder,
)
shape = num_dates, num_assets = (5, len(assets))
dates = self.dates[10:10 + num_dates]
short_factor = RollingSumDifference(window_length=3)
long_factor = RollingSumDifference(window_length=5)
high_factor = RollingSumDifference(
window_length=3,
inputs=[USEquityPricing.open, USEquityPricing.high],
)
pipeline = Pipeline(
columns={
'short': short_factor,
'long': long_factor,
'high': high_factor,
}
)
results = engine.run_pipeline(pipeline, dates[0], dates[-1])
self.assertEqual(set(results.columns), {'short', 'high', 'long'})
assert_multi_index_is_product(
self, results.index, dates, finder.retrieve_all(assets)
)
# row-wise sum over an array whose values are all (1 - 2)
check_arrays(
results['short'].unstack().values,
full(shape, -short_factor.window_length),
)
check_arrays(
results['long'].unstack().values,
full(shape, -long_factor.window_length),
)
# row-wise sum over an array whose values are all (1 - 3)
check_arrays(
results['high'].unstack().values,
full(shape, -2 * high_factor.window_length),
)
def test_numeric_factor(self):
constants = self.constants
loader = self.loader
engine = SimplePipelineEngine(
lambda column: loader, self.dates, self.asset_finder,
)
num_dates = 5
dates = self.dates[10:10 + num_dates]
high, low = USEquityPricing.high, USEquityPricing.low
open, close = USEquityPricing.open, USEquityPricing.close
high_minus_low = RollingSumDifference(inputs=[high, low])
open_minus_close = RollingSumDifference(inputs=[open, close])
avg = (high_minus_low + open_minus_close) / 2
results = engine.run_pipeline(
Pipeline(
columns={
'high_low': high_minus_low,
'open_close': open_minus_close,
'avg': avg,
},
),
dates[0],
dates[-1],
)
high_low_result = results['high_low'].unstack()
expected_high_low = 3.0 * (constants[high] - constants[low])
assert_frame_equal(
high_low_result,
DataFrame(expected_high_low, index=dates, columns=self.assets),
)
open_close_result = results['open_close'].unstack()
expected_open_close = 3.0 * (constants[open] - constants[close])
assert_frame_equal(
open_close_result,
DataFrame(expected_open_close, index=dates, columns=self.assets),
)
avg_result = results['avg'].unstack()
expected_avg = (expected_high_low + expected_open_close) / 2.0
assert_frame_equal(
avg_result,
DataFrame(expected_avg, index=dates, columns=self.assets),
)
def test_rolling_and_nonrolling(self):
open_ = USEquityPricing.open
close = USEquityPricing.close
volume = USEquityPricing.volume
# Test for thirty days up to the last day that we think all
# the assets existed.
dates_to_test = self.dates[-30:]
constants = {open_: 1, close: 2, volume: 3}
loader = ConstantLoader(
constants=constants,
dates=self.dates,
assets=self.assets,
)
engine = SimplePipelineEngine(
lambda column: loader, self.dates, self.asset_finder,
)
sumdiff = RollingSumDifference()
result = engine.run_pipeline(
Pipeline(
columns={
'sumdiff': sumdiff,
'open': open_.latest,
'close': close.latest,
'volume': volume.latest,
},
),
dates_to_test[0],
dates_to_test[-1]
)
self.assertIsNotNone(result)
self.assertEqual(
{'sumdiff', 'open', 'close', 'volume'},
set(result.columns)
)
result_index = self.assets * len(dates_to_test)
result_shape = (len(result_index),)
check_arrays(
result['sumdiff'],
Series(index=result_index, data=full(result_shape, -3)),
)
for name, const in [('open', 1), ('close', 2), ('volume', 3)]:
check_arrays(
result[name],
Series(index=result_index, data=full(result_shape, const)),
)
def test_loader_given_multiple_columns(self):
class Loader1DataSet1(DataSet):
col1 = Column(float32)
col2 = Column(float32)
class Loader1DataSet2(DataSet):
col1 = Column(float32)
col2 = Column(float32)
class Loader2DataSet(DataSet):
col1 = Column(float32)
col2 = Column(float32)
constants1 = {Loader1DataSet1.col1: 1,
Loader1DataSet1.col2: 2,
Loader1DataSet2.col1: 3,
Loader1DataSet2.col2: 4}
loader1 = RecordingConstantLoader(constants=constants1,
dates=self.dates,
assets=self.assets)
constants2 = {Loader2DataSet.col1: 5,
Loader2DataSet.col2: 6}
loader2 = RecordingConstantLoader(constants=constants2,
dates=self.dates,
assets=self.assets)
engine = SimplePipelineEngine(
lambda column:
loader2 if column.dataset == Loader2DataSet else loader1,
self.dates, self.asset_finder,
)
pipe_col1 = RollingSumSum(inputs=[Loader1DataSet1.col1,
Loader1DataSet2.col1,
Loader2DataSet.col1],
window_length=2)
pipe_col2 = RollingSumSum(inputs=[Loader1DataSet1.col2,
Loader1DataSet2.col2,
Loader2DataSet.col2],
window_length=3)
pipe_col3 = RollingSumSum(inputs=[Loader2DataSet.col1],
window_length=3)
columns = OrderedDict([
('pipe_col1', pipe_col1),
('pipe_col2', pipe_col2),
('pipe_col3', pipe_col3),
])
result = engine.run_pipeline(
Pipeline(columns=columns),
self.dates[2], # index is >= the largest window length - 1
self.dates[-1]
)
min_window = min(pip_col.window_length
for pip_col in itervalues(columns))
col_to_val = ChainMap(constants1, constants2)
vals = {name: (sum(col_to_val[col] for col in pipe_col.inputs)
* pipe_col.window_length)
for name, pipe_col in iteritems(columns)}
index = MultiIndex.from_product([self.dates[2:], self.assets])
expected = DataFrame(
data={col:
concatenate((
full((columns[col].window_length - min_window)
* index.levshape[1],
nan),
full((index.levshape[0]
- (columns[col].window_length - min_window))
* index.levshape[1],
val)))
for col, val in iteritems(vals)},
index=index,
columns=columns)
assert_frame_equal(result, expected)
self.assertEqual(set(loader1.load_calls),
{ColumnArgs.sorted_by_ds(Loader1DataSet1.col1,
Loader1DataSet2.col1),
ColumnArgs.sorted_by_ds(Loader1DataSet1.col2,
Loader1DataSet2.col2)})
self.assertEqual(set(loader2.load_calls),
{ColumnArgs.sorted_by_ds(Loader2DataSet.col1,
Loader2DataSet.col2)})
class FrameInputTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
day = cls.env.trading_day
cls.assets = Int64Index([1, 2, 3])
cls.dates = date_range(
'2015-01-01',
'2015-01-31',
freq=day,
tz='UTC',
)
asset_info = make_simple_asset_info(
cls.assets,
start_date=cls.dates[0],
end_date=cls.dates[-1],
)
cls.env.write_data(equities_df=asset_info)
cls.asset_finder = cls.env.asset_finder
@classmethod
def tearDownClass(cls):
del cls.env
del cls.asset_finder
@lazyval
def base_mask(self):
return self.make_frame(True)
def make_frame(self, data):
return DataFrame(data, columns=self.assets, index=self.dates)
def test_compute_with_adjustments(self):
dates, assets = self.dates, self.assets
low, high = USEquityPricing.low, USEquityPricing.high
apply_idxs = [3, 10, 16]
def apply_date(idx, offset=0):
return dates[apply_idxs[idx] + offset]
adjustments = DataFrame.from_records(
[
dict(
kind=MULTIPLY,
sid=assets[1],
value=2.0,
start_date=None,
end_date=apply_date(0, offset=-1),
apply_date=apply_date(0),
),
dict(
kind=MULTIPLY,
sid=assets[1],
value=3.0,
start_date=None,
end_date=apply_date(1, offset=-1),
apply_date=apply_date(1),
),
dict(
kind=MULTIPLY,
sid=assets[1],
value=5.0,
start_date=None,
end_date=apply_date(2, offset=-1),
apply_date=apply_date(2),
),
]
)
low_base = DataFrame(self.make_frame(30.0))
low_loader = DataFrameLoader(low, low_base.copy(), adjustments=None)
# Pre-apply inverse of adjustments to the baseline.
high_base = DataFrame(self.make_frame(30.0))
high_base.iloc[:apply_idxs[0], 1] /= 2.0
high_base.iloc[:apply_idxs[1], 1] /= 3.0
high_base.iloc[:apply_idxs[2], 1] /= 5.0
high_loader = DataFrameLoader(high, high_base, adjustments)
engine = SimplePipelineEngine(
{low: low_loader, high: high_loader}.__getitem__,
self.dates,
self.asset_finder,
)
for window_length in range(1, 4):
low_mavg = SimpleMovingAverage(
inputs=[USEquityPricing.low],
window_length=window_length,
)
high_mavg = SimpleMovingAverage(
inputs=[USEquityPricing.high],
window_length=window_length,
)
bounds = product_upper_triangle(range(window_length, len(dates)))
for start, stop in bounds:
results = engine.run_pipeline(
Pipeline(
columns={'low': low_mavg, 'high': high_mavg}
),
dates[start],
dates[stop],
)
self.assertEqual(set(results.columns), {'low', 'high'})
iloc_bounds = slice(start, stop + 1) # +1 to include end date
low_results = results.unstack()['low']
assert_frame_equal(low_results, low_base.iloc[iloc_bounds])
high_results = results.unstack()['high']
assert_frame_equal(high_results, high_base.iloc[iloc_bounds])
class SyntheticBcolzTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.first_asset_start = Timestamp('2015-04-01', tz='UTC')
cls.env = TradingEnvironment()
cls.trading_day = day = cls.env.trading_day
cls.calendar = date_range('2015', '2015-08', tz='UTC', freq=day)
cls.asset_info = make_rotating_asset_info(
num_assets=6,
first_start=cls.first_asset_start,
frequency=day,
periods_between_starts=4,
asset_lifetime=8,
)
cls.last_asset_end = cls.asset_info['end_date'].max()
cls.all_assets = cls.asset_info.index
cls.env.write_data(equities_df=cls.asset_info)
cls.finder = cls.env.asset_finder
cls.temp_dir = TempDirectory()
cls.temp_dir.create()
try:
cls.writer = SyntheticDailyBarWriter(
asset_info=cls.asset_info[['start_date', 'end_date']],
calendar=cls.calendar,
)
table = cls.writer.write(
cls.temp_dir.getpath('testdata.bcolz'),
cls.calendar,
cls.all_assets,
)
cls.pipeline_loader = USEquityPricingLoader(
BcolzDailyBarReader(table),
NullAdjustmentReader(),
)
except:
cls.temp_dir.cleanup()
raise
@classmethod
def tearDownClass(cls):
del cls.env
cls.temp_dir.cleanup()
def write_nans(self, df):
"""
Write nans to the locations in data corresponding to the (date, asset)
pairs for which we wouldn't have data for `asset` on `date` in a
backtest.
Parameters
----------
df : pd.DataFrame
A DataFrame with a DatetimeIndex as index and an object index of
Assets as columns.
This means that we write nans for dates after an asset's end_date and
**on or before** an asset's start_date. The assymetry here is because
of the fact that, on the morning of an asset's first date, we haven't
yet seen any trades for that asset, so we wouldn't be able to show any
useful data to the user.
"""
# Mask out with nans all the dates on which each asset didn't exist
index = df.index
min_, max_ = index[[0, -1]]
for asset in df.columns:
if asset.start_date >= min_:
start = index.get_loc(asset.start_date, method='bfill')
df.loc[:start + 1, asset] = nan # +1 to overwrite start_date
if asset.end_date <= max_:
end = index.get_loc(asset.end_date)
df.ix[end + 1:, asset] = nan # +1 to *not* overwrite end_date
def test_SMA(self):
engine = SimplePipelineEngine(
lambda column: self.pipeline_loader,
self.env.trading_days,
self.finder,
)
window_length = 5
assets = self.all_assets
dates = date_range(
self.first_asset_start + self.trading_day,
self.last_asset_end,
freq=self.trading_day,
)
dates_to_test = dates[window_length:]
SMA = SimpleMovingAverage(
inputs=(USEquityPricing.close,),
window_length=window_length,
)
results = engine.run_pipeline(
Pipeline(columns={'sma': SMA}),
dates_to_test[0],
dates_to_test[-1],
)
# Shift back the raw inputs by a trading day because we expect our
# computed results to be computed using values anchored on the
# **previous** day's data.
expected_raw = rolling_mean(
self.writer.expected_values_2d(
dates - self.trading_day, assets, 'close',
),
window_length,
min_periods=1,
)
expected = DataFrame(
# Truncate off the extra rows needed to compute the SMAs.
expected_raw[window_length:],
index=dates_to_test, # dates_to_test is dates[window_length:]
columns=self.finder.retrieve_all(assets),
)
self.write_nans(expected)
result = results['sma'].unstack()
assert_frame_equal(result, expected)
def test_drawdown(self):
# The monotonically-increasing data produced by SyntheticDailyBarWriter
# exercises two pathological cases for MaxDrawdown. The actual
# computed results are pretty much useless (everything is either NaN)
# or zero, but verifying we correctly handle those corner cases is
# valuable.
engine = SimplePipelineEngine(
lambda column: self.pipeline_loader,
self.env.trading_days,
self.finder,
)
window_length = 5
assets = self.all_assets
dates = date_range(
self.first_asset_start + self.trading_day,
self.last_asset_end,
freq=self.trading_day,
)
dates_to_test = dates[window_length:]
drawdown = MaxDrawdown(
inputs=(USEquityPricing.close,),
window_length=window_length,
)
results = engine.run_pipeline(
Pipeline(columns={'drawdown': drawdown}),
dates_to_test[0],
dates_to_test[-1],
)
# We expect NaNs when the asset was undefined, otherwise 0 everywhere,
# since the input is always increasing.
expected = DataFrame(
data=zeros((len(dates_to_test), len(assets)), dtype=float),
index=dates_to_test,
columns=self.finder.retrieve_all(assets),
)
self.write_nans(expected)
result = results['drawdown'].unstack()
assert_frame_equal(expected, result)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.