repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
mindriot101/srw | srw/plotting.py | 1 | 1154 | import matplotlib.pyplot as plt
from astropy import units as u
from .logs import get_logger
logger = get_logger(__name__)
try:
import ds9
except ImportError:
logger.warning('No ds9 package available. '
'Related functions are not available')
no_ds9 = True
else:
no_ds9 = False
def plot_transiting(lc, period, epoch, ax=None, unit='mjd', colour=None):
if unit.lower() == 'jd':
epoch -= 2400000.5
lc.compute_phase(period, epoch)
if ax is None:
ax = plt.gca()
phase = lc.phase.copy()
phase[phase > 0.8] -= 1.0
ax.errorbar(phase, lc.flux, lc.fluxerr, ls='None', marker='None',
capsize=0., alpha=0.3, color=colour)
ax.plot(phase, lc.flux, '.', ms=2., color=colour)
def show_on_image(lc, filename, frame_index=0, radius=3 * u.pix):
if no_ds9:
raise NotImplementedError("Cannot find module ds9")
d = ds9.ds9()
d.set('file {0}'.format(filename))
x, y = lc.ccdx[frame_index], lc.ccdy[frame_index]
d.set('region command {{circle {x} {y} {radius}}}'.format(
x=x, y=y, radius=radius.to(u.pix).value))
d.set('zoom to 8')
| mit |
FerranGarcia/shape_learning | scripts/visualize_dataset.py | 3 | 3246 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
from shape_learning.shape_modeler import ShapeModeler
import argparse
parser = argparse.ArgumentParser(description='Displays all the characters in the given dataset')
parser.add_argument('shapes', action="store", nargs='?', default = "[a-z]",
help='A regexp the shapes name should match to be visualised')
parser.add_argument('--dataset_directory', action="store", nargs='?',
help='The directory of the dataset to find the shape dataset')
parser.add_argument('--parameters', action="store", nargs='?',
help='A predefined set of principle parameters to use')
axcolor = 'lightgoldenrodyellow'
sliders = None
mainPlot = None
fig = None
def prepareShapesModel(datasetDirectory, regexp=".*"):
import glob
import os.path
import re
shapes = {}
nameFilter = re.compile(regexp)
datasets = glob.glob(datasetDirectory + '/*.dat')
for dataset in datasets:
if "params" not in dataset:
name = os.path.splitext(os.path.basename(dataset))[0]
if nameFilter.match(name):
shapeModeler = ShapeModeler(init_filename = dataset, num_principle_components = 5)
shapes[name] = shapeModeler
return shapes
def parse_parameters(filename):
params = {}
with open(filename) as f:
key = None
for l in f.readlines():
if l.startswith("#") or l.rstrip()=="": continue
if l.startswith("["): key = l[1:-2]
else: params[key] = np.array([[float(p)] for p in l.split(',')])
return params
if __name__ == "__main__":
#parse arguments
args = parser.parse_args()
datasetDirectory = args.dataset_directory
regexp = args.shapes
initial_params = {}
if args.parameters:
initial_params = parse_parameters(args.parameters)
print("Got initial params for letters %s" % initial_params.keys())
if(not datasetDirectory):
import inspect
fileName = inspect.getsourcefile(ShapeModeler)
installDirectory = fileName.split('/lib')[0]
datasetDirectory = installDirectory + '/share/shape_learning/letter_model_datasets/uji_pen_chars2'
shapes = prepareShapesModel(datasetDirectory, regexp)
print("I will display the following shapes:\n%s" % " ".join(shapes.keys()))
for n, k in enumerate(sorted(shapes.keys())):
shape = shapes[k].meanShape
plt.subplot(5, len(shapes)/5 + 1, n+1)
numPointsInShape = len(shape)/2
x_shape = shape[0:numPointsInShape]
y_shape = shape[numPointsInShape:]
plt.plot(x_shape, -y_shape, c='k')
if k in initial_params:
shape = shapes[k].makeShape(initial_params[k])
x_shape = shape[0:numPointsInShape]
y_shape = shape[numPointsInShape:]
plt.plot(x_shape, -y_shape, c='r')
plt.title("%s (%d samples)" % (k, shapes[k].numShapesInDataset ))
frame1 = plt.gca()
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
plt.show()
| isc |
radjkarl/imgProcessor | imgProcessor/interpolate/interpolate2dUnstructuredIDW.py | 1 | 1926 | from __future__ import division
import numpy as np
from numba import njit
@njit
def interpolate2dUnstructuredIDW(x, y, v, grid, power=2):
'''
x,y,v --> 1d numpy.array
grid --> 2d numpy.array
fast if number of given values is small relative to grid resolution
'''
n = len(v)
gx = grid.shape[0]
gy = grid.shape[1]
for i in range(gx):
for j in range(gy):
overPx = False # if pixel position == point position
sumWi = 0.0
value = 0.0
for k in range(n):
xx = x[k]
yy = y[k]
vv = v[k]
if xx == i and yy == j:
grid[i, j] = vv
overPx = True
break
# weight from inverse distance:
wi = 1 / ((xx - i)**2 + (yy - j)**2)**(0.5 * power)
sumWi += wi
value += wi * vv
if not overPx:
grid[i, j] = value / sumWi
return grid
if __name__ == '__main__':
import matplotlib.pyplot as plt
import sys
shape = (1000, 2000)
nVals = 30
# GIVING A SEED NUMBER FOR THE EXPERIENCE TO BE REPRODUCIBLE
np.random.seed(123433789)
grid = np.zeros(shape, dtype='float32') # float32 gives us a lot precision
# CREATE POINT SET
x, y = np.random.randint(
0, shape[0], nVals), np.random.randint(0, shape[1], nVals)
v = np.random.randint(0, 10, nVals) # THIS IS MY VARIABLE
# CALCULATE
grid = interpolate2dUnstructuredIDW(x, y, v, grid, 2)
# PLOT
if 'no_window' not in sys.argv:
plt.imshow(grid.T, origin='lower', interpolation='nearest', cmap='jet')
plt.scatter(x, y, c=v, cmap='jet', s=120)
plt.xlim(0, grid.shape[0])
plt.ylim(0, grid.shape[1])
plt.grid()
plt.show()
| gpl-3.0 |
jzt5132/scikit-learn | sklearn/metrics/ranking.py | 79 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
pylayers/pylayers | pylayers/simul/examples/ex_simulem_fur.py | 3 | 1157 | from pylayers.simul.simulem import *
from pylayers.signal.bsignal import *
from pylayers.measures.mesuwb import *
import matplotlib.pyplot as plt
from pylayers.gis.layout import *
#M=UWBMesure(173)
M=UWBMesure(13)
#M=UWBMesure(1)
cir=TUsignal()
cirf=TUsignal()
#cir.readcir("where2cir-tx001-rx145.mat","Tx001")
#cirf.readcir("where2-furcir-tx001-rx145.mat","Tx001")
cir.readcir("where2cir-tx002-rx012.mat","Tx002")
#cirf.readcir("where2-furcir-tx002-rx012.mat","Tx002")
#cir.readcir("where2cir-tx001-rx001.mat","Tx001")
#cirf.readcir("where2-furcir-tx001-rx001.mat","Tx001")
plt.ion()
fig = plt.figure()
fig.subplots_adjust(hspace=0.5)
ax1 = fig.add_subplot(411,title="points and layout")
L=Layout()
L.load('siradel-cut-fur.ini')
#L.build()
L.showGs(fig=fig,ax=ax1)
ax1.plot(M.tx[0],M.tx[1],'or')
#ax1.plot(M.rx[1][0],M.rx[1][1],'ob')
ax1.plot(M.rx[2][0],M.rx[2][1],'ob')
ax2 = fig.add_subplot(412,title="Measurement")
M.tdd.ch2.plot()
#ax3 = fig.add_subplot(413,title="Simulation with furniture",sharex=ax2,sharey=ax2)
#cirf.plot(col='red')
ax4 = fig.add_subplot(414,title="Simulation",sharex=ax2,sharey=ax2)
cir.plot(col='blue')
plt.show()
| mit |
rahul-c1/scikit-learn | sklearn/cross_decomposition/pls_.py | 6 | 28685 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights))
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
## 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights))
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / np.dot(y_weights.T, y_weights)
## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coefs: array, [p, q]
The coefficients of the linear model: Y = X coefs + Err
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float, copy=self.copy)
Y = check_array(Y, dtype=np.float, copy=self.copy)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if n != Y.shape[0]:
raise ValueError(
'Incompatible shapes: X has %s samples, while Y '
'has %s' % (X.shape[0], Y.shape[0]))
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if not self.deflation_mode in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
#1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
#2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coefs = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coefs = (1. / self.x_std_.reshape((p, 1)) * self.coefs *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_) / self.x_std_
if Y is not None:
Yc = (np.asarray(Y) - self.y_mean_) / self.y_std_
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
if Y is not None:
Y = np.asarray(Y)
Yc -= self.y_mean_
Yc /= self.y_std_
# Apply rotation
x_scores = np.dot(Xc, self.x_rotations_)
if Y is not None:
y_scores = np.dot(Yc, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
# Normalize
if copy:
Xc = (np.asarray(X) - self.x_mean_)
else:
X = np.asarray(X)
Xc -= self.x_mean_
Xc /= self.x_std_
Ypred = np.dot(Xc, self.coefs)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coefs: array, [p, q]
The coefficients of the linear model: Y = X coefs + Err
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples is the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q]
Training vectors, where n_samples is the number of samples and
q is the number of response variables.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vector, where n_samples is the number of samples and
p is the number of predictors. X will be centered before any analysis.
Y : array-like of response, shape = [n_samples, q]
Training vector, where n_samples is the number of samples and
q is the number of response variables. X will be centered before any
analysis.
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale X and Y.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float, copy=self.copy)
Y = check_array(Y, dtype=np.float, copy=self.copy)
p = X.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('invalid number of components')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components == C.shape[1]:
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
treycausey/scikit-learn | sklearn/externals/joblib/__init__.py | 3 | 4468 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs. In addition, Joblib
can also be used to provide a light-weight make replacement or caching
solution.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.8.0a3'
from .memory import Memory
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
linebp/pandas | pandas/tests/util/test_hashing.py | 12 | 12908 | import pytest
import datetime
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, Index, MultiIndex
from pandas.util import hash_array, hash_pandas_object
from pandas.core.util.hashing import hash_tuples, hash_tuple, _hash_scalar
import pandas.util.testing as tm
class TestHashing(object):
def setup_method(self, method):
self.df = DataFrame(
{'i32': np.array([1, 2, 3] * 3, dtype='int32'),
'f32': np.array([None, 2.5, 3.5] * 3, dtype='float32'),
'cat': Series(['a', 'b', 'c'] * 3).astype('category'),
'obj': Series(['d', 'e', 'f'] * 3),
'bool': np.array([True, False, True] * 3),
'dt': Series(pd.date_range('20130101', periods=9)),
'dt_tz': Series(pd.date_range('20130101', periods=9,
tz='US/Eastern')),
'td': Series(pd.timedelta_range('2000', periods=9))})
def test_consistency(self):
# check that our hash doesn't change because of a mistake
# in the actual code; this is the ground truth
result = hash_pandas_object(Index(['foo', 'bar', 'baz']))
expected = Series(np.array([3600424527151052760, 1374399572096150070,
477881037637427054], dtype='uint64'),
index=['foo', 'bar', 'baz'])
tm.assert_series_equal(result, expected)
def test_hash_array(self):
for name, s in self.df.iteritems():
a = s.values
tm.assert_numpy_array_equal(hash_array(a), hash_array(a))
def test_hash_array_mixed(self):
result1 = hash_array(np.array([3, 4, 'All']))
result2 = hash_array(np.array(['3', '4', 'All']))
result3 = hash_array(np.array([3, 4, 'All'], dtype=object))
tm.assert_numpy_array_equal(result1, result2)
tm.assert_numpy_array_equal(result1, result3)
def test_hash_array_errors(self):
for val in [5, 'foo', pd.Timestamp('20130101')]:
pytest.raises(TypeError, hash_array, val)
def check_equal(self, obj, **kwargs):
a = hash_pandas_object(obj, **kwargs)
b = hash_pandas_object(obj, **kwargs)
tm.assert_series_equal(a, b)
kwargs.pop('index', None)
a = hash_pandas_object(obj, **kwargs)
b = hash_pandas_object(obj, **kwargs)
tm.assert_series_equal(a, b)
def check_not_equal_with_index(self, obj):
# check that we are not hashing the same if
# we include the index
if not isinstance(obj, Index):
a = hash_pandas_object(obj, index=True)
b = hash_pandas_object(obj, index=False)
if len(obj):
assert not (a == b).all()
def test_hash_tuples(self):
tups = [(1, 'one'), (1, 'two'), (2, 'one')]
result = hash_tuples(tups)
expected = hash_pandas_object(MultiIndex.from_tuples(tups)).values
tm.assert_numpy_array_equal(result, expected)
result = hash_tuples(tups[0])
assert result == expected[0]
def test_hash_tuple(self):
# test equivalence between hash_tuples and hash_tuple
for tup in [(1, 'one'), (1, np.nan), (1.0, pd.NaT, 'A'),
('A', pd.Timestamp("2012-01-01"))]:
result = hash_tuple(tup)
expected = hash_tuples([tup])[0]
assert result == expected
def test_hash_scalar(self):
for val in [1, 1.4, 'A', b'A', u'A', pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-01", tz='Europe/Brussels'),
datetime.datetime(2012, 1, 1),
pd.Timestamp("2012-01-01", tz='EST').to_pydatetime(),
pd.Timedelta('1 days'), datetime.timedelta(1),
pd.Period('2012-01-01', freq='D'), pd.Interval(0, 1),
np.nan, pd.NaT, None]:
result = _hash_scalar(val)
expected = hash_array(np.array([val], dtype=object),
categorize=True)
assert result[0] == expected[0]
def test_hash_tuples_err(self):
for val in [5, 'foo', pd.Timestamp('20130101')]:
pytest.raises(TypeError, hash_tuples, val)
def test_multiindex_unique(self):
mi = MultiIndex.from_tuples([(118, 472), (236, 118),
(51, 204), (102, 51)])
assert mi.is_unique
result = hash_pandas_object(mi)
assert result.is_unique
def test_multiindex_objects(self):
mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]],
labels=[[0, 1, 0, 2], [2, 0, 0, 1]],
names=['col1', 'col2'])
recons = mi._sort_levels_monotonic()
# these are equal
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# _hashed_values and hash_pandas_object(..., index=False)
# equivalency
expected = hash_pandas_object(
mi, index=False).values
result = mi._hashed_values
tm.assert_numpy_array_equal(result, expected)
expected = hash_pandas_object(
recons, index=False).values
result = recons._hashed_values
tm.assert_numpy_array_equal(result, expected)
expected = mi._hashed_values
result = recons._hashed_values
# values should match, but in different order
tm.assert_numpy_array_equal(np.sort(result),
np.sort(expected))
def test_hash_pandas_object(self):
for obj in [Series([1, 2, 3]),
Series([1.0, 1.5, 3.2]),
Series([1.0, 1.5, np.nan]),
Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
Series(['a', 'b', 'c']),
Series(['a', np.nan, 'c']),
Series(['a', None, 'c']),
Series([True, False, True]),
Series(),
Index([1, 2, 3]),
Index([True, False, True]),
DataFrame({'x': ['a', 'b', 'c'], 'y': [1, 2, 3]}),
DataFrame(),
tm.makeMissingDataframe(),
tm.makeMixedDataFrame(),
tm.makeTimeDataFrame(),
tm.makeTimeSeries(),
tm.makeTimedeltaIndex(),
tm.makePeriodIndex(),
Series(tm.makePeriodIndex()),
Series(pd.date_range('20130101',
periods=3, tz='US/Eastern')),
MultiIndex.from_product(
[range(5),
['foo', 'bar', 'baz'],
pd.date_range('20130101', periods=2)]),
MultiIndex.from_product(
[pd.CategoricalIndex(list('aabc')),
range(3)])]:
self.check_equal(obj)
self.check_not_equal_with_index(obj)
def test_hash_pandas_object2(self):
for name, s in self.df.iteritems():
self.check_equal(s)
self.check_not_equal_with_index(s)
def test_hash_pandas_empty_object(self):
for obj in [Series([], dtype='float64'),
Series([], dtype='object'),
Index([])]:
self.check_equal(obj)
# these are by-definition the same with
# or w/o the index as the data is empty
def test_categorical_consistency(self):
# GH15143
# Check that categoricals hash consistent with their values, not codes
# This should work for categoricals of any dtype
for s1 in [Series(['a', 'b', 'c', 'd']),
Series([1000, 2000, 3000, 4000]),
Series(pd.date_range(0, periods=4))]:
s2 = s1.astype('category').cat.set_categories(s1)
s3 = s2.cat.set_categories(list(reversed(s1)))
for categorize in [True, False]:
# These should all hash identically
h1 = hash_pandas_object(s1, categorize=categorize)
h2 = hash_pandas_object(s2, categorize=categorize)
h3 = hash_pandas_object(s3, categorize=categorize)
tm.assert_series_equal(h1, h2)
tm.assert_series_equal(h1, h3)
def test_categorical_with_nan_consistency(self):
c = pd.Categorical.from_codes(
[-1, 0, 1, 2, 3, 4],
categories=pd.date_range('2012-01-01', periods=5, name='B'))
expected = hash_array(c, categorize=False)
c = pd.Categorical.from_codes(
[-1, 0],
categories=[pd.Timestamp('2012-01-01')])
result = hash_array(c, categorize=False)
assert result[0] in expected
assert result[1] in expected
def test_pandas_errors(self):
for obj in [pd.Timestamp('20130101')]:
with pytest.raises(TypeError):
hash_pandas_object(obj)
with catch_warnings(record=True):
obj = tm.makePanel()
with pytest.raises(TypeError):
hash_pandas_object(obj)
def test_hash_keys(self):
# using different hash keys, should have different hashes
# for the same data
# this only matters for object dtypes
obj = Series(list('abc'))
a = hash_pandas_object(obj, hash_key='9876543210123456')
b = hash_pandas_object(obj, hash_key='9876543210123465')
assert (a != b).all()
def test_invalid_key(self):
# this only matters for object dtypes
def f():
hash_pandas_object(Series(list('abc')), hash_key='foo')
pytest.raises(ValueError, f)
def test_alread_encoded(self):
# if already encoded then ok
obj = Series(list('abc')).str.encode('utf8')
self.check_equal(obj)
def test_alternate_encoding(self):
obj = Series(list('abc'))
self.check_equal(obj, encoding='ascii')
def test_same_len_hash_collisions(self):
for l in range(8):
length = 2**(l + 8) + 1
s = tm.rands_array(length, 2)
result = hash_array(s, 'utf8')
assert not result[0] == result[1]
for l in range(8):
length = 2**(l + 8)
s = tm.rands_array(length, 2)
result = hash_array(s, 'utf8')
assert not result[0] == result[1]
def test_hash_collisions(self):
# hash collisions are bad
# https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726
L = ['Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9', # noqa
'Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe'] # noqa
# these should be different!
result1 = hash_array(np.asarray(L[0:1], dtype=object), 'utf8')
expected1 = np.array([14963968704024874985], dtype=np.uint64)
tm.assert_numpy_array_equal(result1, expected1)
result2 = hash_array(np.asarray(L[1:2], dtype=object), 'utf8')
expected2 = np.array([16428432627716348016], dtype=np.uint64)
tm.assert_numpy_array_equal(result2, expected2)
result = hash_array(np.asarray(L, dtype=object), 'utf8')
tm.assert_numpy_array_equal(
result, np.concatenate([expected1, expected2], axis=0))
def test_deprecation():
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
from pandas.tools.hashing import hash_pandas_object
obj = Series(list('abc'))
hash_pandas_object(obj, hash_key='9876543210123456')
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
from pandas.tools.hashing import hash_array
obj = np.array([1, 2, 3])
hash_array(obj, hash_key='9876543210123456')
| bsd-3-clause |
rahul-c1/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 19 | 22876 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_loss_grad_hess,
_multinomial_loss_grad_hess
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
"""Simple sanity check on a 2 classes dataset
Make sure it predicts the correct result on simple datasets.
"""
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
"""Test for appropriate exception on errors"""
assert_raises(ValueError, LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
"""Test logistic regression with the iris dataset"""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_multinomial_binary():
"""Test multinomial LR on a binary problem."""
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
"""Test sparsify and densify members."""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
"""Test that an exception is raised on inconsistent input"""
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
"""Test that we can write to coef_ and intercept_"""
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
"""Test proper NaN handling.
Regression test for Issue #252: fit used to go into an infinite loop.
"""
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
"""Test that the path algorithm is consistent"""
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_random_state():
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_loss_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_2, grad_2, hess = _logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
loss_interp_2, grad_interp_2, hess = \
_logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
"""test for LogisticRegressionCV object"""
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
loss_interp, grad_interp, hess_interp = _logistic_loss_grad_hess(
w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
loss, grad, hess = _logistic_loss_grad_hess(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
"""Test that OvR and multinomial are correct using the iris dataset."""
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=auto
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='auto')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='auto')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
"""Test that warnings are raised if model does not converge"""
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
"""Tests for the multinomial option in logistic regression"""
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=50, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
_, grad, hessp = _multinomial_loss_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_loss_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_loss_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
"""Test negative prediction when decision_function values are zero.
Liblinear predicts the positive class when decision_function values
are zero. This is a test to verify that we do not do the same.
See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
"""
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
"""Test LogRegCV with solver='liblinear' works for sparse matrices"""
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
| bsd-3-clause |
snnn/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 46 | 6682 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
"label": metric_spec.MetricSpec(lambda predictions, labels: labels)
})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testFeatureEngineeringFnWithSameName(self):
def input_fn():
return {
"x": constant_op.constant(["9."])
}, {
"y": constant_op.constant(["99."])
}
def feature_engineering_fn(features, labels):
# Github #12205: raise a TypeError if called twice.
_ = string_ops.string_split(features["x"])
features["x"] = constant_op.constant([9.])
labels["y"] = constant_op.constant([99.])
return features, labels
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
"label": metric_spec.MetricSpec(lambda predictions, labels: labels)
})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = training_util.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
ericl1u/eece7398 | controllers/pydmps/cs.py | 1 | 4378 | '''
Copyright (C) 2013 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
class CanonicalSystem():
"""Implementation of the canonical dynamical system
as described in Dr. Stefan Schaal's (2002) paper"""
def __init__(self, dt, pattern='discrete'):
"""Default values from Schaal (2012)
dt float: the timestep
pattern string: either 'discrete' or 'rhythmic'
"""
self.ax = 1.0
self.pattern = pattern
if pattern == 'discrete':
self.step = self.step_discrete
self.run_time = 1.0
elif pattern == 'rhythmic':
self.step = self.step_rhythmic
self.run_time = 2*np.pi
else:
raise Exception('Invalid pattern type specified: \
Please specify rhythmic or discrete.')
self.dt = dt
self.timesteps = int(self.run_time / self.dt)
self.reset_state()
def rollout(self, **kwargs):
"""Generate x for open loop movements.
"""
if 'tau' in kwargs:
timesteps = int(self.timesteps / kwargs['tau'])
else:
timesteps = self.timesteps
self.x_track = np.zeros(timesteps)
self.reset_state()
for t in range(timesteps):
self.x_track[t] = self.x
self.step(**kwargs)
return self.x_track
def reset_state(self):
"""Reset the system state"""
self.x = 1.0
def step_discrete(self, tau=1.0, error_coupling=1.0):
"""Generate a single step of x for discrete
(potentially closed) loop movements.
Decaying from 1 to 0 according to dx = -ax*x.
tau float: gain on execution time
increase tau to make the system execute faster
error_coupling float: slow down if the error is > 1
"""
self.x += (-self.ax * self.x * error_coupling) * tau * self.dt
return self.x
def step_rhythmic(self, tau=1.0, error_coupling=1.0):
"""Generate a single step of x for rhythmic
closed loop movements. Decaying from 1 to 0
according to dx = -ax*x.
tau float: gain on execution time
increase tau to make the system execute faster
error_coupling float: slow down if the error is > 1
"""
self.x += (1 * error_coupling * tau) * self.dt
return self.x
#==============================
# Test code
#==============================
if __name__ == "__main__":
cs = CanonicalSystem(dt=.001, pattern='discrete')
# test normal rollout
x_track1 = cs.rollout()
cs.reset_state()
# test error coupling
timesteps = int(1.0/.001)
x_track2 = np.zeros(timesteps)
err = np.zeros(timesteps)
err[200:400] = 2
err_coup = 1.0 / (1 + err)
for i in range(timesteps):
x_track2[i] = cs.step(error_coupling=err_coup[i])
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots(figsize=(6,3))
ax1.plot(x_track1, lw=2)
ax1.plot(x_track2, lw=2)
plt.grid()
plt.legend(['normal rollout', 'error coupling'])
ax2 = ax1.twinx()
ax2.plot(err, 'r-', lw=2)
plt.legend(['error'], loc='lower right')
plt.ylim(0, 3.5)
plt.xlabel('time (s)')
plt.ylabel('x')
plt.title('Canonical system - discrete')
for t1 in ax2.get_yticklabels():
t1.set_color('r')
plt.tight_layout()
cs = CanonicalSystem(dt=.001, pattern='rhythmic')
# test normal rollout
x_track1 = cs.rollout()
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots(figsize=(6,3))
ax1.plot(x_track1, lw=2)
plt.grid()
plt.legend(['normal rollout'], loc='lower right')
plt.xlabel('time (s)')
plt.ylabel('x')
plt.title('Canonical system - rhythmic')
plt.show()
| gpl-3.0 |
lucidfrontier45/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 5 | 2555 | import unittest
import nose
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less
from .test_gmm import GMMTester
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.eval(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.eval(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
if __name__ == '__main__':
nose.runmodule()
| bsd-3-clause |
JudoWill/glue | glue/core/roi.py | 1 | 34433 | from __future__ import absolute_import, division, print_function
from functools import wraps
import numpy as np
from matplotlib.patches import Polygon, Rectangle, Ellipse, PathPatch
from matplotlib.patches import Path as mplPath
from matplotlib.transforms import IdentityTransform, blended_transform_factory
import copy
np.seterr(all='ignore')
from .exceptions import UndefinedROI
__all__ = ['Roi', 'RectangularROI', 'CircularROI', 'PolygonalROI',
'AbstractMplRoi', 'MplRectangularROI', 'MplCircularROI',
'MplPolygonalROI', 'MplXRangeROI', 'MplYRangeROI',
'XRangeROI', 'RangeROI', 'YRangeROI','VertexROIBase']
PATCH_COLOR = '#FFFF00'
SCRUBBING_KEY = 'control'
try:
from matplotlib.nxutils import points_inside_poly
except ImportError: # nxutils removed in MPL v1.3
from matplotlib.path import Path as mplPath
def points_inside_poly(xypts, xyvts):
p = mplPath(xyvts)
return p.contains_points(xypts)
def aspect_ratio(axes):
""" Returns the pixel height / width of a box that spans 1
data unit in x and y
"""
width = axes.get_position().width * axes.figure.get_figwidth()
height = axes.get_position().height * axes.figure.get_figheight()
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
return height / width / (ymax - ymin) * (xmax - xmin)
def data_to_norm(axes, x, y):
xy = np.column_stack((np.asarray(x).ravel(), np.asarray(y).ravel()))
pixel = axes.transData.transform(xy)
norm = axes.transAxes.inverted().transform(pixel)
return norm
def data_to_pixel(axes, x, y):
xy = np.column_stack((np.asarray(x).ravel(), np.asarray(y).ravel()))
return axes.transData.transform(xy)
def pixel_to_data(axes, x, y):
xy = np.column_stack((np.asarray(x).ravel(), np.asarray(y).ravel()))
return axes.transData.inverted().transform(xy)
class Roi(object): # pragma: no cover
"""
A geometrical 2D region of interest.
Glue uses Roi's to represent user-drawn regions on plots. There
are many specific subtypes of Roi, but they all have a ``contains``
method to test whether a collection of 2D points lies inside the region.
"""
def contains(self, x, y):
"""Return true/false for each x/y pair.
:param x: Array of X locations
:param y: Array of Y locations
:returns: A Boolean array, where each element is True
if the corresponding (x,y) tuple is inside the Roi.
:raises: UndefinedROI exception if not defined
"""
raise NotImplementedError()
def center(self):
"""Return the (x,y) coordinates of the ROI center"""
raise NotImplementedError()
def move_to(self, x, y):
"""Translate the ROI to a center of (x, y)"""
raise NotImplementedError()
def defined(self):
""" Returns whether or not the subset is properly defined """
raise NotImplementedError()
def to_polygon(self):
""" Returns a tuple of x and y points, approximating the ROI
as a polygon."""
raise NotImplementedError
def copy(self):
"""
Return a clone of the ROI
"""
return copy.copy(self)
class PointROI(Roi):
def __init__(self, x=None, y=None):
self.x = x
self.y = y
def contains(self, x, y):
return False
def move_to(self, x, y):
self.x = x
self.y = y
def defined(self):
try:
return np.isfinite([self.x, self.y]).all()
except TypeError:
return False
def center(self):
return self.x, self.y
def reset(self):
self.x = self.y = None
class RectangularROI(Roi):
"""
A 2D rectangular region of interest.
"""
def __init__(self, xmin=None, xmax=None, ymin=None, ymax=None):
super(RectangularROI, self).__init__()
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __str__(self):
if self.defined():
return "x=[%0.3f, %0.3f], y=[%0.3f, %0.3f]" % (self.xmin,
self.xmax,
self.ymin,
self.ymax)
else:
return "Undefined Rectangular ROI"
def center(self):
return self.xmin + self.width() / 2, self.ymin + self.height() / 2
def move_to(self, x, y):
cx, cy = self.center()
dx = x - cx
dy = y - cy
self.xmin += dx
self.xmax += dx
self.ymin += dy
self.ymax += dy
def corner(self):
return (self.xmin, self.ymin)
def width(self):
return self.xmax - self.xmin
def height(self):
return self.ymax - self.ymin
def contains(self, x, y):
"""
Test whether a set of (x,y) points falls within
the region of interest
:param x: A scalar or numpy array of x points
:param y: A scalar or numpy array of y points
*Returns*
A list of True/False values, for whether each (x,y)
point falls within the ROI
"""
if not self.defined():
raise UndefinedROI
return (x > self.xmin) & (x < self.xmax) & \
(y > self.ymin) & (y < self.ymax)
def update_limits(self, xmin, ymin, xmax, ymax):
"""
Update the limits of the rectangle
"""
self.xmin = min(xmin, xmax)
self.xmax = max(xmin, xmax)
self.ymin = min(ymin, ymax)
self.ymax = max(ymin, ymax)
def reset(self):
"""
Reset the rectangular region.
"""
self.xmin = None
self.xmax = None
self.ymin = None
self.ymax = None
def defined(self):
return self.xmin is not None
def to_polygon(self):
if self.defined():
return [self.xmin, self.xmax, self.xmax, self.xmin, self.xmin], \
[self.ymin, self.ymin, self.ymax, self.ymax, self.ymin]
else:
return [], []
def __gluestate__(self, context):
return dict(xmin=self.xmin, xmax=self.xmax, ymin=self.ymin, ymax=self.ymax)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(xmin=rec['xmin'], xmax=rec['xmax'],
ymin=rec['ymin'], ymax=rec['ymax'])
class RangeROI(Roi):
def __init__(self, orientation, min=None, max=None):
""":param orientation: 'x' or 'y'. Sets which axis to range"""
super(RangeROI, self).__init__()
if orientation not in ['x', 'y']:
raise TypeError("Orientation must be one of 'x', 'y'")
self.min = min
self.max = max
self.ori = orientation
def __str__(self):
if self.defined():
return "%0.3f < %s < %0.3f" % (self.min, self.ori,
self.max)
else:
return "Undefined %s" % type(self).__name__
def range(self):
return self.min, self.max
def center(self):
return (self.min + self.max) / 2
def set_range(self, lo, hi):
self.min, self.max = lo, hi
def move_to(self, center):
delta = center - self.center()
self.min += delta
self.max += delta
def contains(self, x, y):
if not self.defined():
raise UndefinedROI()
coord = x if self.ori == 'x' else y
return (coord > self.min) & (coord < self.max)
def reset(self):
self.min = None
self.max = None
def defined(self):
return self.min is not None and self.max is not None
def to_polygon(self):
if self.defined():
on = [self.min, self.max, self.max, self.min, self.min]
off = [-1e100, -1e100, 1e100, 1e100, -1e100]
x, y = (on, off) if (self.ori == 'x') else (off, on)
return x, y
else:
return [], []
def __gluestate__(self, context):
return dict(ori=self.ori, min=self.min, max=self.max)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(rec['ori'], min=rec['min'], max=rec['max'])
class XRangeROI(RangeROI):
def __init__(self, min=None, max=None):
super(XRangeROI, self).__init__('x', min=min, max=max)
class YRangeROI(RangeROI):
def __init__(self, min=None, max=None):
super(YRangeROI, self).__init__('y', min=min, max=max)
class CircularROI(Roi):
"""
A 2D circular region of interest.
"""
def __init__(self, xc=None, yc=None, radius=None):
super(CircularROI, self).__init__()
self.xc = xc
self.yc = yc
self.radius = radius
def contains(self, x, y):
"""
Test whether a set of (x,y) points falls within
the region of interest
:param x: A list of x points
:param y: A list of y points
*Returns*
A list of True/False values, for whether each (x,y)
point falls within the ROI
"""
if not self.defined():
raise UndefinedROI
if not isinstance(x, np.ndarray):
x = np.asarray(x)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
return (x - self.xc) ** 2 + (y - self.yc) ** 2 < self.radius ** 2
def set_center(self, x, y):
"""
Set the center of the circular region
"""
self.xc = x
self.yc = y
def set_radius(self, radius):
"""
Set the radius of the circular region
"""
self.radius = radius
def get_center(self):
return self.xc, self.yc
def get_radius(self):
return self.radius
def reset(self):
"""
Reset the rectangular region.
"""
self.xc = None
self.yc = None
self.radius = 0.
def defined(self):
""" Returns True if the ROI is defined """
return self.xc is not None and \
self.yc is not None and self.radius is not None
def to_polygon(self):
""" Returns x, y, where each is a list of points """
if not self.defined():
return [], []
theta = np.linspace(0, 2 * np.pi, num=20)
x = self.xc + self.radius * np.cos(theta)
y = self.yc + self.radius * np.sin(theta)
return x, y
def __gluestate__(self, context):
return dict(xc=self.xc, yc=self.yc, radius=self.radius)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(xc=rec['xc'], yc=rec['yc'], radius=rec['radius'])
class VertexROIBase(Roi):
def __init__(self, vx=None, vy=None):
"""
:param vx: initial x vertices
:type vx: list
:param vy: initial y vertices
:type vy: list
"""
super(VertexROIBase, self).__init__()
self.vx = vx
self.vy = vy
if self.vx is None:
self.vx = []
if self.vy is None:
self.vy = []
def add_point(self, x, y):
"""
Add another vertex to the ROI
:param x: The x coordinate
:param y: The y coordinate
"""
self.vx.append(x)
self.vy.append(y)
def reset(self):
"""
Reset the vertex list.
"""
self.vx = []
self.vy = []
def replace_last_point(self, x, y):
if len(self.vx) > 0:
self.vx[-1] = x
self.vy[-1] = y
def remove_point(self, x, y, thresh=None):
"""Remove the vertex closest to a reference (xy) point
:param x: The x coordinate of the reference point
:param y: The y coordinate of the reference point
:param thresh: An optional threshhold. If present, the vertex
closest to (x,y) will only be removed if the distance
is less than thresh
"""
if len(self.vx) == 0:
return
# find distance between vertices and input
dist = [(x - a) ** 2 + (y - b) ** 2 for a, b
in zip(self.vx, self.vy)]
inds = range(len(dist))
near = min(inds, key=lambda x: dist[x])
if thresh is not None and dist[near] > (thresh ** 2):
return
self.vx = [self.vx[i] for i in inds if i != near]
self.vy = [self.vy[i] for i in inds if i != near]
def defined(self):
return len(self.vx) > 0
def to_polygon(self):
return self.vx, self.vy
def __gluestate__(self, context):
return dict(vx=np.asarray(self.vx).tolist(),
vy=np.asarray(self.vy).tolist())
@classmethod
def __setgluestate__(cls, rec, context):
return cls(vx=rec['vx'], vy=rec['vy'])
class PolygonalROI(VertexROIBase):
"""
A class to define 2D polygonal regions-of-interest
"""
def __str__(self):
result = 'Polygonal ROI ('
result += ','.join(['(%s, %s)' % (x, y)
for x, y in zip(self.vx, self.vy)])
result += ')'
return result
def contains(self, x, y):
"""
Test whether a set of (x,y) points falls within
the region of interest
:param x: A list of x points
:param y: A list of y points
*Returns*
A list of True/False values, for whether each (x,y)
point falls within the ROI
"""
if not self.defined():
raise UndefinedROI
if not isinstance(x, np.ndarray):
x = np.asarray(x)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
xypts = np.column_stack((x.flat, y.flat))
xyvts = np.column_stack((self.vx, self.vy))
result = points_inside_poly(xypts, xyvts)
good = np.isfinite(xypts).all(axis=1)
result[~good] = False
result.shape = x.shape
return result
def move_to(self, xdelta, ydelta):
self.vx = list(map(lambda x: x + xdelta, self.vx))
self.vy = list(map(lambda y: y + ydelta, self.vy))
class Path(VertexROIBase):
def __str__(self):
result = 'Path ('
result += ','.join(['(%s, %s)' % (x, y)
for x, y in zip(self.vx, self.vy)])
result += ')'
return result
class AbstractMplRoi(object): # pragma: no cover
""" Base class for objects which use
Matplotlib user events to edit/display ROIs
"""
def __init__(self, axes):
"""
:param axes: The Matplotlib Axes object to draw to
"""
self._axes = axes
self._roi = self._roi_factory()
self._previous_roi = None
self._mid_selection = False
self._scrubbing = False
def _draw(self):
self._axes.figure.canvas.draw()
def _roi_factory(self):
raise NotImplementedError()
def roi(self):
return self._roi.copy()
def reset(self, include_roi=True):
self._mid_selection = False
self._scrubbing = False
if include_roi:
self._roi.reset()
self._sync_patch()
def active(self):
return self._mid_selection
def start_selection(self, event):
raise NotImplementedError()
def update_selection(self, event):
raise NotImplementedError()
def finalize_selection(self, event):
raise NotImplementedError()
def abort_selection(self, event):
if self._mid_selection:
self._roi_restore()
self.reset(include_roi=False)
def _sync_patch(self):
raise NotImplementedError()
def _roi_store(self):
self._previous_roi = self._roi.copy()
def _roi_restore(self):
self._roi = self._previous_roi
class MplPickROI(AbstractMplRoi):
def _draw(self):
pass
def _roi_factory(self):
return PointROI()
def start_selection(self, event):
self._roi.x = event.xdata
self._roi.y = event.ydata
def update_selection(self, event):
self._roi.x = event.xdata
self._roi.y = event.ydata
def finalize_selection(self, event):
self._roi.x = event.xdata
self._roi.y = event.ydata
def _sync_patch(self):
pass
class MplRectangularROI(AbstractMplRoi):
"""
A subclass of RectangularROI that also renders the ROI to a plot
*Attributes*:
plot_opts:
Dictionary instance
A dictionary of plot keywords that are passed to
the patch representing the ROI. These control
the visual properties of the ROI
"""
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self._xi = None
self._yi = None
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
self._patch = Rectangle((0., 0.), 1., 1.)
self._patch.set_zorder(100)
self._setup_patch()
def _setup_patch(self):
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return RectangularROI()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
self._xi = event.xdata
self._yi = event.ydata
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._cx, self._cy = self._roi.center()
else:
self.reset()
self._roi.update_limits(event.xdata, event.ydata,
event.xdata, event.ydata)
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(self._cx + event.xdata - self._xi,
self._cy + event.ydata - self._yi)
else:
self._roi.update_limits(min(event.xdata, self._xi),
min(event.ydata, self._yi),
max(event.xdata, self._xi),
max(event.ydata, self._yi))
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._draw()
def _sync_patch(self):
if self._roi.defined():
corner = self._roi.corner()
width = self._roi.width()
height = self._roi.height()
self._patch.set_xy(corner)
self._patch.set_width(width)
self._patch.set_height(height)
self._patch.set(**self.plot_opts)
self._patch.set_visible(True)
else:
self._patch.set_visible(False)
self._draw()
def __str__(self):
return "MPL Rectangle: %s" % self._patch
class MplXRangeROI(AbstractMplRoi):
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self._xi = None
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
trans = blended_transform_factory(self._axes.transData,
self._axes.transAxes)
self._patch = Rectangle((0., 0.), 1., 1., transform=trans)
self._patch.set_zorder(100)
self._setup_patch()
def _setup_patch(self):
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return XRangeROI()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._dx = event.xdata - self._roi.center()
else:
self.reset()
self._roi.set_range(event.xdata, event.xdata)
self._xi = event.xdata
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(event.xdata + self._dx)
else:
self._roi.set_range(min(event.xdata, self._xi),
max(event.xdata, self._xi))
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._draw()
def _sync_patch(self):
if self._roi.defined():
rng = self._roi.range()
self._patch.set_xy((rng[0], 0))
self._patch.set_width(rng[1] - rng[0])
self._patch.set_height(1)
self._patch.set(**self.plot_opts)
self._patch.set_visible(True)
else:
self._patch.set_visible(False)
self._draw()
class MplYRangeROI(AbstractMplRoi):
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self._xi = None
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
trans = blended_transform_factory(self._axes.transAxes,
self._axes.transData)
self._patch = Rectangle((0., 0.), 1., 1., transform=trans)
self._patch.set_zorder(100)
self._setup_patch()
def _setup_patch(self):
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return YRangeROI()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._dy = event.ydata - self._roi.center()
else:
self.reset()
self._roi.set_range(event.ydata, event.ydata)
self._xi = event.ydata
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(event.ydata + self._dy)
else:
self._roi.set_range(min(event.ydata, self._xi),
max(event.ydata, self._xi))
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._draw()
def _sync_patch(self):
if self._roi.defined():
rng = self._roi.range()
self._patch.set_xy((0, rng[0]))
self._patch.set_height(rng[1] - rng[0])
self._patch.set_width(1)
self._patch.set(**self.plot_opts)
self._patch.set_visible(True)
else:
self._patch.set_visible(False)
self._draw()
class MplCircularROI(AbstractMplRoi):
"""
Class to display / edit circular ROIs using matplotlib
Since circles on the screen may not be circles in the data
(due, e.g., to logarithmic scalings on the axes), the
ultimate ROI that is created is a polygonal ROI
:param plot_opts:
A dictionary of plot keywords that are passed to
the patch representing the ROI. These control
the visual properties of the ROI
"""
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
self._xi = None
self._yi = None
self._setup_patch()
def _setup_patch(self):
self._patch = Ellipse((0., 0.), transform=IdentityTransform(),
width=0., height=0.,)
self._patch.set_zorder(100)
self._patch.set(**self.plot_opts)
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return CircularROI()
def _sync_patch(self):
# Update geometry
if not self._roi.defined():
self._patch.set_visible(False)
else:
xy = self._roi.get_center()
r = self._roi.get_radius()
self._patch.center = xy
self._patch.width = 2. * r
self._patch.height = 2. * r
self._patch.set_visible(True)
# Update appearance
self._patch.set(**self.plot_opts)
# Refresh
self._axes.figure.canvas.draw()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
xy = data_to_pixel(self._axes, [event.xdata], [event.ydata])
xi = xy[0, 0]
yi = xy[0, 1]
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(xi, yi):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
(xc, yc) = self._roi.get_center()
self._dx = xc - xi
self._dy = yc - yi
else:
self.reset()
self._roi.set_center(xi, yi)
self._roi.set_radius(0.)
self._xi = xi
self._yi = yi
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
xy = data_to_pixel(self._axes, [event.xdata], [event.ydata])
xi = xy[0, 0]
yi = xy[0, 1]
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.set_center(xi + self._dx, yi + self._dy)
else:
dx = xy[0, 0] - self._xi
dy = xy[0, 1] - self._yi
self._roi.set_radius(np.hypot(dx, dy))
self._sync_patch()
def roi(self):
if not self._roi.defined():
return PolygonalROI()
theta = np.linspace(0, 2 * np.pi, num=200)
xy_center = self._roi.get_center()
rad = self._roi.get_radius()
x = xy_center[0] + rad * np.cos(theta)
y = xy_center[1] + rad * np.sin(theta)
xy_data = pixel_to_data(self._axes, x, y)
vx = xy_data[:, 0].ravel().tolist()
vy = xy_data[:, 1].ravel().tolist()
result = PolygonalROI(vx, vy)
return result
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._axes.figure.canvas.draw()
class MplPolygonalROI(AbstractMplRoi):
"""
Defines and displays polygonal ROIs on matplotlib plots
Attributes:
plot_opts: Dictionary instance
A dictionary of plot keywords that are passed to
the patch representing the ROI. These control
the visual properties of the ROI
"""
def __init__(self, axes):
"""
:param axes: A matplotlib Axes object to attach the graphical ROI to
"""
AbstractMplRoi.__init__(self, axes)
self.plot_opts = {'edgecolor': PATCH_COLOR, 'facecolor': PATCH_COLOR,
'alpha': 0.3}
self._setup_patch()
def _setup_patch(self):
self._patch = Polygon(np.array(list(zip([0, 1], [0, 1]))))
self._patch.set_zorder(100)
self._patch.set(**self.plot_opts)
self._axes.add_patch(self._patch)
self._patch.set_visible(False)
self._sync_patch()
def _roi_factory(self):
return PolygonalROI()
def _sync_patch(self):
# Update geometry
if not self._roi.defined():
self._patch.set_visible(False)
else:
x, y = self._roi.to_polygon()
self._patch.set_xy(list(zip(x + [x[0]],
y + [y[0]])))
self._patch.set_visible(True)
# Update appearance
self._patch.set(**self.plot_opts)
# Refresh
self._axes.figure.canvas.draw()
def start_selection(self, event):
if event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
elif not self._roi.contains(event.xdata, event.ydata):
return False
self._roi_store()
if event.key == SCRUBBING_KEY:
self._scrubbing = True
self._cx = event.xdata
self._cy = event.ydata
else:
self.reset()
self._roi.add_point(event.xdata, event.ydata)
self._mid_selection = True
self._sync_patch()
def update_selection(self, event):
if not self._mid_selection or event.inaxes != self._axes:
return False
if event.key == SCRUBBING_KEY:
if not self._roi.defined():
return False
if self._scrubbing:
self._roi.move_to(event.xdata - self._cx,
event.ydata - self._cy)
self._cx = event.xdata
self._cy = event.ydata
else:
self._roi.add_point(event.xdata, event.ydata)
self._sync_patch()
def finalize_selection(self, event):
self._scrubbing = False
self._mid_selection = False
self._patch.set_visible(False)
self._axes.figure.canvas.draw()
class MplPathROI(MplPolygonalROI):
def roi_factory(self):
return Path()
def _setup_patch(self):
self._patch = None
def _sync_patch(self):
if self._patch is not None:
self._patch.remove()
self._patch = None
# Update geometry
if not self._roi.defined():
return
else:
x, y = self._roi.to_polygon()
p = MplPath(np.column_stack((x, y)))
self._patch = PatchPath(p)
self._patch.set_visible(True)
# Update appearance
self._patch.set(**self.plot_opts)
# Refresh
self._axes.figure.canvas.draw()
def finalize_selection(self, event):
self._mid_selection = False
if self._patch is not None:
self._patch.set_visible(False)
self._axes.figure.canvas.draw()
class CategoricalRoi(Roi):
"""
A ROI abstraction to represent selections of categorical data.
"""
def __init__(self, categories=None):
if categories is None:
self.categories = None
else:
self.update_categories(categories)
def _categorical_helper(self, indata):
"""
A helper function to do the rigamaroll of getting categorical data.
:param indata: Any type of input data
:return: The best guess at the categorical data associated with indata
"""
try:
if indata.categorical:
return indata._categorical_data
else:
return indata[:]
except AttributeError:
return np.asarray(indata)
def contains(self, x, y):
"""
Test whether a set categorical elements fall within
the region of interest
:param x: Any array-like object of categories
(includes CategoricalComponenets)
:param y: Unused but required for compatibility
*Returns*
A list of True/False values, for whether each x value falls
within the ROI
"""
check = self._categorical_helper(x)
index = np.minimum(np.searchsorted(self.categories, check),
len(self.categories)-1)
return self.categories[index] == check
def update_categories(self, categories):
self.categories = np.unique(self._categorical_helper(categories))
def defined(self):
""" Returns True if the ROI is defined """
return self.categories is not None
def reset(self):
self.categories = None
@staticmethod
def from_range(cat_comp, lo, hi):
"""
Utility function to help construct the Roi from a range.
:param cat_comp: Anything understood by ._categorical_helper ... array, list or component
:param lo: lower bound of the range
:param hi: upper bound of the range
:return: CategoricalRoi object
"""
roi = CategoricalRoi()
cat_data = cat_comp._categories
roi.update_categories(cat_data[np.floor(lo):np.ceil(hi)])
return roi
| bsd-3-clause |
SunilProgramer/stimfit | src/pystfio/stfio_plot.py | 1 | 21243 | """
Some plotting utilities to use scale bars rather than coordinate axes.
04 Feb 2011, C. Schmidt-Hieber, University College London
From the stfio module:
http://code.google.com/p/stimfit
"""
# TODO: Pin scale bars to their position
# TODO: Implement 2-channel plots
import os
import sys
import numpy as np
import numpy.ma as ma
has_mpl = True
try:
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.axislines import Subplot
except ImportError:
has_mpl = False
scale_dist_x = 0.04
scale_dist_y = 0.04
graph_width = 6.0
graph_height = 4.0
key_dist = 0.04
def save_ma(ftrunk, marr):
if not isinstance(marr, ma.core.MaskedArray):
marr = ma.array(marr, mask=False)
data = np.array(marr)
mask = np.array(marr.mask)
np.save(ftrunk + ".data.npy", data)
np.save(ftrunk + ".mask.npy", mask)
def load_ma(ftrunk):
data = np.load(ftrunk + ".data.npy")
mask = np.load(ftrunk + ".mask.npy")
return ma.array(data, mask=mask)
class Timeseries(object):
# it this is 2d, the second axis (shape[1]) is time
def __init__(self, *args, **kwargs): # data, dt, xunits="ms", yunits="mV"):
if len(args) > 2:
raise RuntimeError("Timeseries accepts at most two non-keyworded " +
"arguments")
fromFile = False
# First argument has to be either data or file_trunk
if isinstance(args[0], str):
if len(args) > 1:
raise RuntimeError("Timeseries accepts only one non-keyworded " +
"argument if instantiated from file")
if os.path.exists("%s_data.npy" % args[0]):
self.data = np.load("%s_data.npy" % args[0])
else:
self.data = load_ma("%s_data.npy" % args[0])
self.dt = np.load("%s_dt.npy" % args[0])
fxu = open("%s_xunits" % args[0], 'r')
self.xunits = fxu.read()
fxu.close()
fyu = open("%s_yunits" % args[0], 'r')
self.yunits = fyu.read()
fyu.close()
fromFile = True
else:
self.data = args[0]
self.dt = args[1]
if len(kwargs) > 0 and fromFile:
raise RuntimeError("Can't set keyword arguments if Timeseries was " +
"instantiated from file")
for key in kwargs:
if key == "xunits":
self.xunits = kwargs["xunits"]
elif key == "yunits":
self.yunits = kwargs["yunits"]
elif key == "linestyle":
self.linestyle = kwargs["linestyle"]
elif key == "linewidth":
self.linewidth = kwargs["linewidth"]
elif key == "color":
self.color = kwargs["color"]
elif key == "colour":
self.color = kwargs["colour"]
else:
raise RuntimeError("Unknown keyword argument: " + key)
if "xunits" not in kwargs and not fromFile:
self.xunits = "ms"
if "yunits" not in kwargs and not fromFile:
self.yunits = "mV"
if "linestyle" not in kwargs:
self.linestyle = "-"
if "linewidth" not in kwargs:
self.linewidth=1.0
if "color" not in kwargs and "colour" not in kwargs:
self.color='k'
def __getitem__(self, idx):
return self.data[idx]
def __setitem__(self, idx, value):
self.data[idx] = value
def x_trange(self, tstart, tend):
return np.arange(int(tstart/self.dt), int(tend/self.dt), 1.0,
dtype=np.float) * self.dt
def y_trange(self, tstart, tend):
return self.data[int(tstart/self.dt):int(tend/self.dt)]
def timearray(self):
return np.arange(0.0, self.data.shape[-1]) * self.dt
def duration(self):
return self.data.shape[-1] * self.dt
def interpolate(self, newtime, newdt):
if len(self.data.shape) == 1:
return Timeseries(np.interp(newtime, self.timearray(), self.data,
left=np.nan, right=np.nan), newdt)
else:
# interpolate each row individually:
# iparray = ma.zeros((self.data.shape[0], len(newtime)))
# for nrow, row in enumerate(self.data):
# flin = \
# interpolate.interp1d(self.timearray(), row,
# bounds_error=False, fill_value=np.nan, kind=kind)
# iparray[nrow,:]=flin(newtime)
iparray = ma.array([
np.interp(newtime, self.timearray(), row, left=np.nan, right=np.nan)
for nrow, row in enumerate(self.data)])
return Timeseries(iparray, newdt)
def maskedarray(self, center, left, right):
# check whether we have enough data left and right:
if len(self.data.shape) > 1:
mask = \
np.zeros((self.data.shape[0], int((right+left)/self.dt)))
maskedarray = \
ma.zeros((self.data.shape[0], int((right+left)/self.dt)))
else:
mask = np.zeros((int((right+left)/self.dt)))
maskedarray = ma.zeros((int((right+left)/self.dt)))
offset = 0
if center - left < 0:
if len(self.data.shape) > 1:
mask[:,:int((left-center)/self.dt)] = 1
else:
mask[:int((left-center)/self.dt)] = 1
leftindex = 0
offset = int((left-center)/self.dt)
else:
leftindex = int((center-left)/self.dt)
if center + right >= len(self.data) * self.dt:
endtime = len(self.data) * self.dt
if len(self.data.shape) > 1:
mask[:,-int((center+right-endtime)/self.dt):] = 1
else:
mask[-int((center+right-endtime)/self.dt):] = 1
rightindex = int(endtime/self.dt)
else:
rightindex = int((center+right)/self.dt)
for timest in range(leftindex, rightindex):
if len(self.data.shape) > 1:
if timest-leftindex+offset < maskedarray.shape[1] and \
timest<self.data.shape[1]:
maskedarray[:,timest-leftindex+offset]=self.data[:,timest]
else:
if timest-leftindex+offset < len(maskedarray):
maskedarray[timest-leftindex+offset]=self.data[timest]
maskedarray.mask = ma.make_mask(mask)
return Timeseries(maskedarray, self.dt)
def save(self, file_trunk):
if isinstance(self.data, ma.MaskedArray):
save_ma("%s_data.npy" % file_trunk, self.data)
else:
np.save("%s_data.npy" % file_trunk, self.data)
np.save("%s_dt.npy" % file_trunk, self.dt)
fxu = open("%s_xunits" % file_trunk, 'w')
fxu.write(self.xunits)
fxu.close()
fyu = open("%s_yunits" % file_trunk, 'w')
fyu.write(self.yunits)
fyu.close()
def plot(self):
fig = plt.figure(figsize=(8,6))
ax = StandardAxis(fig, 111, hasx=True)
ax.plot(self.timearray(), self.data, '-k')
class timeseries(Timeseries):
def __init__(self, *args, **kwargs):
super(timeseries, self).__init__(*args, **kwargs)
sys.stderr.write("stfio_plot.timeseries is deprecated. " +
"Use stfio_plot.Timeseries instead.\n")
class StandardAxis(Subplot):
def __init__(self, *args, **kwargs):
hasx = kwargs.pop( 'hasx', False)
hasy = kwargs.pop( 'hasy', True)
kwargs['frameon'] = False
super(StandardAxis, self).__init__(*args, **kwargs)
args[0].add_axes(self)
self.axis["right"].set_visible(False)
self.axis["top"].set_visible(False)
if not hasx:
self.axis["bottom"].set_visible(False)
if not hasy:
self.axis["left"].set_visible(False)
def average(tsl):
# find fastest dt:
dt_common = 1e12
for ts in tsl:
if ts.dt < dt_common:
newtime = ts.timearray()
dt_common = ts.dt
# interpolate all series to new dt:
tslip = [ts.interpolate(newtime, dt_common) for ts in tsl]
if len(tslip[0].data.shape)==1:
ave = np.empty((len(tslip), len(tslip[0].data)))
else:
ave = np.empty((len(tslip), tslip[0].data.shape[0], tslip[0].data.shape[1]))
for its, ts in enumerate(tslip):
if len(ts.data.shape)==1:
ave[its] = ts.data
else:
ave[its,:,:] = ts.data[:,:]
if len(ts.data.shape)==1:
return Timeseries(ma.mean(ave, axis=0), dt_common)
else:
avef = ma.zeros((tslip[0].data.shape[0], tslip[0].data.shape[1]))
for nrow, row in enumerate(avef):
avef[nrow,:] = ma.mean(ave[:,nrow,:], axis=0)
return Timeseries(avef, dt_common)
def prettyNumber(f):
fScaled = f
if fScaled < 1:
correct = 10.0
else:
correct = 1.0
# set stepsize
try:
nZeros = int(np.log10(fScaled))
except OverflowError:
nZeros = 0
prev10e = 10.0**nZeros / correct
next10e = prev10e * 10
if fScaled / prev10e > 7.5:
return next10e
elif fScaled / prev10e > 5.0:
return 5 * prev10e
else:
return round(fScaled/prev10e) * prev10e
def plot_scalebars(ax, div=3.0, labels=True,
xunits="", yunits="", nox=False,
sb_xoff=0, sb_yoff=0,
sb_ylabel_xoff_comp=False, sb_ylabel_yoff=0, rotate_yslabel=False,
linestyle="-k", linewidth=4.0,
textcolor='k', textweight='normal',
xmin=None, xmax=None, ymin=None, ymax=None):
# print dir(ax.dataLim)
if xmin is None:
xmin = ax.dataLim.xmin
if xmax is None:
xmax = ax.dataLim.xmax
if ymin is None:
ymin = ax.dataLim.ymin
if ymax is None:
ymax = ax.dataLim.ymax
xscale = xmax-xmin
yscale = ymax-ymin
xoff = (scale_dist_x + sb_xoff) * xscale
if sb_ylabel_xoff_comp:
xoff_ylabel = scale_dist_x * xscale
else:
xoff_ylabel = xoff
yoff = (scale_dist_y - sb_yoff) * yscale
# plot scale bars:
xlength = prettyNumber((xmax-xmin)/div)
xend_x, xend_y = xmax, ymin
if not nox:
xstart_x, xstart_y = xmax-xlength, ymin
scalebarsx = [xstart_x+xoff, xend_x+xoff]
scalebarsy = [xstart_y-yoff, xend_y-yoff]
else:
scalebarsx=[xend_x+xoff,]
scalebarsy=[xend_y-yoff]
ylength = prettyNumber((ymax-ymin)/div)
yend_x, yend_y = xmax, ymin+ylength
scalebarsx.append(yend_x+xoff)
scalebarsy.append(yend_y-yoff)
ax.plot(scalebarsx, scalebarsy, linestyle, linewidth=linewidth, solid_joinstyle='miter')
if labels:
# if textcolor is not None:
# color = "\color{%s}" % textcolor
# else:
# color = ""
if not nox:
# xlabel
if xlength >=1:
xlabel = r"%d$\,$%s" % (xlength, xunits)
else:
xlabel = r"%g$\,$%s" % (xlength, xunits)
xlabel_x, xlabel_y = xmax-xlength/2.0, ymin
xlabel_y -= key_dist*yscale
ax.text(xlabel_x+xoff, xlabel_y-yoff, xlabel, ha='center', va='top',
weight=textweight, color=textcolor) #, [pyx.text.halign.center,pyx.text.valign.top])
# ylabel
if ylength >=1:
ylabel = r"%d$\,$%s" % (ylength,yunits)
else:
ylabel = r"%g$\,$%s" % (ylength,yunits)
if not rotate_yslabel:
ylabel_x, ylabel_y = xmax, ymin + ylength/2.0 + sb_ylabel_yoff*yscale
ylabel_x += key_dist*xscale
ax.text(ylabel_x+xoff_ylabel, ylabel_y-yoff, ylabel, ha='left', va='center',
weight=textweight, color=textcolor)
else:
ylabel_x, ylabel_y = xmax, ymin + ylength/2.0 + sb_ylabel_yoff
ylabel_x += key_dist*xscale
ax.text(ylabel_x+xoff_ylabel, ylabel_y-yoff, ylabel, ha='left', va='center', rotation=90,
weight=textweight, color=textcolor)
def xFormat(x, res, data_len, width):
points = int(width/2.5 * res)
part = float(x) / data_len
return int(part*points)
def yFormat(y):
return y
def reduce(ydata, dy, maxres, xoffset=0, width=graph_width):
x_last = xFormat(0, maxres, len(ydata), width)
y_last = yFormat(ydata[0])
y_max = y_last
y_min = y_last
x_next = 0
y_next = 0
xrange = list()
yrange = list()
xrange.append(x_last)
yrange.append(y_last)
for (n,pt) in enumerate(ydata[:-1]):
x_next = xFormat(n+1, maxres, len(ydata), width)
y_next = yFormat(ydata[n+1])
# if we are still at the same pixel column, only draw if this is an extremum:
if (x_next == x_last):
if (y_next < y_min):
y_min = y_next
if (y_next > y_max):
y_max = y_next
else:
# else, always draw and reset extrema:
if (y_min != y_next):
xrange.append(x_last)
yrange.append(y_min)
y_last = y_min
if (y_max != y_next):
xrange.append(x_last)
yrange.append(y_max)
y_last = y_max
xrange.append(x_next)
yrange.append(y_next)
y_min = y_next
y_max = y_next
x_last = x_next
y_last = y_next
trace_len_pts = width/2.5 * maxres
trace_len_time = len(ydata) * dy
dt_per_pt = trace_len_time / trace_len_pts
xrange = np.array(xrange)*dt_per_pt + xoffset
return xrange, yrange
def plot_traces(traces, traces2=None, ax=None, Fig=None, pulses=None,
xmin=None, xmax=None, ymin=None, ymax=None,
y2min=None, y2max=None, xoffset=0,
maxres = None,
plot_sb=True, sb_yoff=0, sb_xoff=0, linestyle_sb = "-k",
dashedline=None, sagline=None, rotate_yslabel=False,
textcolor='k', textweight='normal',
textcolor2='r',
figsize=None,
pulseprop=0.05, border=0.2):
if ax is None:
if figsize is None:
Fig = plt.figure(dpi=maxres)
else:
Fig = plt.figure(dpi=maxres, figsize=figsize)
Fig.patch.set_alpha(0.0)
if pulses is not None and len(pulses) > 0:
prop = 1.0-pulseprop-border
else:
prop = 1.0-border
ax = Fig.add_axes([0.0,(1.0-prop),1.0-border,prop], alpha=0.0)
if y2min is not None and y2max is not None:
ymin, ymax = np.inf, -np.inf # This is a hack to find the y-limits of the
# traces. It should be supplied by stf.plot_ymin() & stf.plot_ymax(). But
# this doesn't work for a 2nd channel, and if the active channel is 2 (IN
# 2). It seems like stf.plot_ymin/max always gets data from channel 0 (IN
# 0), irrespective of which is choosen as the active channel.
for trace in traces:
if maxres is None:
xrange = trace.timearray()+xoffset
yrange = trace.data
else:
xrange, yrange = reduce(trace.data, trace.dt, maxres=maxres)
xrange += xoffset
if y2min is not None and y2max is not None:
ymin, ymax = min(ymin,yrange.min()), max(ymax,yrange.max()) # Hack to
# get y-limits of data. See comment above.
ax.plot(xrange, yrange, trace.linestyle, lw=trace.linewidth, color=trace.color)
y2min, y2max = np.inf, -np.inf # Hack to get y-limit of data, see comment above.
if traces2 is not None:
copy_ax = ax.twinx()
for trace in traces2:
if maxres is None:
xrange = trace.timearray()+xoffset
yrange = trace.data
else:
xrange, yrange = reduce(trace.data, trace.dt, maxres=maxres)
xrange += xoffset
y2min, y2max = min(y2min,yrange.min()), max(y2max,yrange.max()) #
# Hack to get y-limit of data, see comment above.
copy_ax.plot(xrange, yrange, trace.linestyle, lw=trace.linewidth, color=trace.color)
else:
copy_ax = None
if xmin is not None:
phantomrect_x0 = xmin
else:
phantomrect_x0 = ax.dataLim.xmin
if xmax is not None:
phantomrect_x1 = xmax
else:
phantomrect_x1 = ax.dataLim.xmax
if ymin is not None:
phantomrect_y0 = ymin
else:
phantomrect_y0 = ax.dataLim.ymin
if ymax is not None:
phantomrect_y1 = ymax
else:
phantomrect_y1 = ax.dataLim.ymax
pr = ax.plot([phantomrect_x0, phantomrect_x1], [phantomrect_y0, phantomrect_y1], alpha=0.0)
if traces2 is not None:
if y2min is not None:
phantomrect_y20 = y2min
else:
phantomrect_y20 = copy_ax.dataLim.ymin
if y2max is not None:
phantomrect_y21 = y2max
else:
phantomrect_y21 = copy_ax.dataLim.ymax
pr = copy_ax.plot([phantomrect_x0, phantomrect_x1], [phantomrect_y20, phantomrect_y21], alpha=0.0)
xscale = ax.dataLim.xmax-ax.dataLim.xmin
yscale = ax.dataLim.ymax-ax.dataLim.ymin
if dashedline is not None:
ax.plot([ax.dataLim.xmin, ax.dataLim.xmax],[dashedline, dashedline],
"--k", linewidth=traces[0].linewidth*2.0)
gridline_x, gridline_y = ax.dataLim.xmax, dashedline
gridline_x += key_dist*xscale
xoff = scale_dist_x * xscale
if sagline is not None:
ax.plot([ax.dataLim.xmin, ax.dataLim.xmax],[sagline, sagline],
"--k", linewidth=traces[0].linewidth*2.0)
gridline_x, gridline_y = ax.dataLim.xmax, sagline
gridline_x += key_dist*xscale
xoff = scale_dist_x * xscale
if xmin is None:
xmin = ax.dataLim.xmin
if xmax is None:
xmax = ax.dataLim.xmax
if ymin is None:
ymin = ax.dataLim.ymin
if ymax is None:
ymax = ax.dataLim.ymax
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
if traces2 is not None:
if y2min is None:
y2min = copy_ax.dataLim.ymin
if y2max is None:
y2max = copy_ax.dataLim.ymax
copy_ax.set_ylim(y2min, y2max)
sb_xoff_total = -0.03+sb_xoff
sb_yl_yoff = 0.025
else:
sb_xoff_total = sb_xoff
sb_yl_yoff = 0
if plot_sb:
plot_scalebars(ax, linestyle=linestyle_sb, xunits=traces[0].xunits, yunits=traces[0].yunits,
textweight=textweight, textcolor=textcolor, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax,
rotate_yslabel = rotate_yslabel, sb_xoff=sb_xoff_total,
sb_ylabel_xoff_comp=(traces2 is not None), sb_ylabel_yoff=sb_yl_yoff)
if traces2 is not None:
plot_scalebars(copy_ax, linestyle=traces2[0].linestyle, xunits=traces2[0].xunits, yunits=traces2[0].yunits,
textweight=textweight, textcolor=textcolor2, xmin=xmin, xmax=xmax, ymin=y2min, ymax=y2max,
rotate_yslabel = rotate_yslabel, nox=True,
sb_xoff=-0.01+sb_xoff, sb_ylabel_xoff_comp=True, sb_ylabel_yoff=-sb_yl_yoff)
if pulses is not None and len(pulses) > 0:
axp = Fig.add_axes([0.0,0.0,1.0-border,pulseprop+border/4.0], sharex=ax)
for pulse in pulses:
xrange = pulse.timearray()
yrange = pulse.data
axp.plot(xrange, yrange, pulse.linestyle, linewidth=pulse.linewidth, color=pulse.color)
plot_scalebars(axp, linestyle=linestyle_sb, nox=True, yunits=pulses[0].yunits,
textweight=textweight, textcolor=textcolor)
for o in axp.findobj():
o.set_clip_on(False)
axp.axis('off')
for o in ax.findobj():
o.set_clip_on(False)
ax.axis('off')
if traces2 is not None:
copy_ax.axis('off')
if ax is None:
return Fig
return ax # NOTE copy_ax HKT mod
def standard_axis(fig, subplot, sharex=None, sharey=None, hasx=False, hasy=True):
sys.stderr.write("This method is deprecated. Use stfio_plot.StandardAxis instead.\n")
try:
it = iter(subplot)
if isinstance(gs1, matplotlib.gridspec.GridSpec):
ax1 = Subplot(fig, subplot, frameon=False, sharex=sharex, sharey=sharey)
else:
ax1 = Subplot(fig, subplot[0], subplot[1], subplot[2], frameon=False, sharex=sharex, sharey=sharey)
except:
ax1 = Subplot(fig, subplot, frameon=False, sharex=sharex, sharey=sharey)
fig.add_axes(ax1)
ax1.axis["right"].set_visible(False)
ax1.axis["top"].set_visible(False)
if not hasx:
ax1.axis["bottom"].set_visible(False)
if not hasy:
ax1.axis["left"].set_visible(False)
return ax1
| gpl-2.0 |
lpsinger/astropy | astropy/utils/compat/optional_deps.py | 5 | 1183 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
# First, the top-level packages:
# TODO: This list is a duplicate of the dependencies in setup.cfg "all", but
# some of the package names are different from the pip-install name (e.g.,
# beautifulsoup4 -> bs4).
_optional_deps = ['bleach', 'bottleneck', 'bs4', 'bz2', 'h5py', 'html5lib',
'IPython', 'jplephem', 'lxml', 'matplotlib', 'mpmath',
'pandas', 'PIL', 'pytz', 'scipy', 'skyfield',
'sortedcontainers', 'lzma', 'yaml']
_deps = {k.upper(): k for k in _optional_deps}
# Any subpackages that have different import behavior:
_deps['PLT'] = 'matplotlib.pyplot'
__all__ = [f"HAS_{pkg}" for pkg in _deps]
def __getattr__(name):
if name in __all__:
module_name = name[4:]
try:
importlib.import_module(_deps[module_name])
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}.")
| bsd-3-clause |
plorch/FocusOnWildlifeCMPScripts | sessions_inproj_byuser.py | 1 | 28167 | #Python 2.7.9 (default, Apr 5 2015, 22:21:35)
# the full environment I've tested this in is in basic_project_stats.yml
# note: this code replicates a lot of what's in basic_project_stats.py
# and then builds on it.
'''
sessions_inproj_byuser:
Computes session statistics for each user who has classified on your project.
Useful for understanding how people are using your project.
Only uses classification statistics (does not consider Talk participation).
The program considers all classifications by a user to be in the same session if no more than 60 minutes elapses between the classifications in the session. (You can specify a different time gap threshold.) After assigning each classification to a session for that user, it computes statistics on the sessions such as the typical length of a session and number of classifications per session.
Run the code without any inputs to get back information on the input and output formats.
The output csv file will have the following columns:
n_class: total number of classifications by the classifier
n_sessions: total number of sessions by the classifier
n_days: number of unique days on which the classifier has classified
first_day: date of first classification (YYYY-MM-DD)
last_day: date of last classification (YYYY-MM-DD)
tdiff_firstlast_hours: time elapsed between first and last classification (hours)
time_spent_classifying_total_minutes: total time spent actually classifying, i.e. work effort (minutes)
class_per_session_min: minimum number of classifications per session
class_per_session_max: maximum number of classifications per session
class_per_session_med: median number of classifications per session
class_per_session_mean: mean number of classifications per session
class_length_mean_overall: mean length of a single classification (minutes), over all sessions
class_length_median_overall: median length of a single classification (minutes), over all sessions
session_length_mean: mean length of a session (minutes)
session_length_median: median length of a session (minutes)
session_length_min: length of shortest session (minutes)
session_length_max: length of longest session (minutes)
which_session_longest: session number of classifier's longest session (by time spent)
mean_session_length_first2: mean session length in the classifier's first 2 sessions (minutes)
mean_session_length_last2: mean session length in the classifier's last 2 sessions (minutes)
mean_class_length_first2: mean classification length in the classifier's first 2 sessions (minutes)
mean_class_length_last2: mean classification length in the classifier's last 2 sessions (minutes)
class_count_session_list: classification counts in each session, formatted as: [n_class_1; n_class_2; ...]
The mean session and classification lengths in the first 2 and last 2 sessions are only calculated if the user has classified in at least 4 sessions; otherwise the values are 0.
'''
import sys
# file with raw classifications (csv)
# put this way up here so if there are no inputs we exit quickly before even trying to load everything else
default_statstart = "session_stats"
try:
classfile_in = sys.argv[1]
except:
#classfile_in = 'galaxy_zoo_bar_lengths/galaxy-zoo-bar-lengths-classifications.csv'
print "\nUsage: "+sys.argv[0]+" classifications_infile [stats_outfile add_dates_to_file session_break_length]"
print " classifications_infile is a Zooniverse (Panoptes) classifications data export CSV."
print " stats_outfile is the name of an outfile you'd like to write."
print " if you don't specify one it will be "+default_statstart+"_[date]_to_[date].csv"
print " where the dates show the first & last classification date."
print " add_dates_to_file is 1 if you want to add \"_[date]_to_[date]\" to the output filename, as"
print " described above, even if you did specify a stats_outfile name."
print " A new session is defined to start when 2 classifications by the same classifier are"
print " separated by at least session_break_length minutes (default value: 60)"
print "\nOnly the classifications_infile is a required input.\n"
sys.exit(0)
import numpy as np # works in 1.10.1
import pandas as pd # works in 0.13.1
import datetime
import dateutil.parser
import json
# timestamps & timediffs are in nanoseconds below but we want outputs in hours or minutes, depending
# Note: I'd like to keep units in days but then a session length etc in seconds is ~1e-5 and that's too
# close to floating-point errors for my liking (because this might be read into Excel)
# we will use either this below, or
# /datetime.timedelta(hours=1)
# depending on whether the output is in a timedelta (use above) or in float (use below).
ns2hours = 1.0 / (1.0e9*60.*60.)
ns2mins = 1.0 / (1.0e9*60.)
# columns currently in an exported Panoptes classification file:
# user_name,user_id,user_ip,workflow_id,workflow_name,workflow_version,created_at,gold_standard,expert,metadata,annotations,subject_data
# user_name is either their registered name or "not-logged-in"+their hashed IP
# user_id is their numeric Zooniverse ID or blank if they're unregistered
# user_ip is a hashed version of their IP
# workflow_id is the numeric ID of this workflow, which you can find in the project builder URL for managing the workflow:
# https://www.zooniverse.org/lab/[project_id]/workflow/[workflow_id]/
# workflow_name is the name you gave your workflow (for sanity checks)
# workflow_version is [bigchangecount].[smallchangecount] and is probably pretty big
# created_at is the date the entry for the classification was recorded
# gold_standard is 1 if this classification was done in gold standard mode
# expert is 1 if this classification was done in expert mode... I think
# metadata (json) is the data the browser sent along with the classification.
# Includes browser information, language, started_at and finished_at
# note started_at and finished_at are perhaps the easiest way to calculate the length of a classification
# (the duration elapsed between consecutive created_at by the same user is another way)
# the difference here is back-end vs front-end
# annotations (json) contains the actual classification information
# which for this analysis we will ignore completely, for now
# subject_data is cross-matched from the subjects table and is for convenience in data reduction
# here we will ignore this too, except to count subjects once.
# we'll also ignore user_ip, workflow information, gold_standard, and expert.
#
# some of these will be defined further down, but before we actually use this list.
cols_used = ["created_at_ts", "user_name", "user_id", "created_at", "started_at", "finished_at"]
# Check for the other inputs on the command line
# Output file
try:
statsfile_out = sys.argv[2]
# If it's given on the command line, don't add the dates to the filename later
modstatsfile = False
except:
statsfile_out = default_statstart+".csv"
modstatsfile = True
try:
add_date_temp = int(sys.argv[3])
if add_date_temp == 1:
modstatsfile = True
# else nothing, just keep whatever modstatsfile is already defined as
except:
# ignore this as you'll have already defined modstatsfile above
pass
# The separation between 2 classifications, in minutes, that defines the start of a new session for a classifier
try:
session_break = float(sys.argv[4])
except:
session_break = 60.
# Print out the input parameters just as a sanity check
print "Computing session stats using:"
print " infile:",classfile_in
# If we're adding the dates to the output file, we can't print it out here because we don't yet know the dates
if not modstatsfile:
print " outfile:",statsfile_out
print " new session starts after classifier break of",session_break,"minutes\n"
#################################################################################
#################################################################################
#################################################################################
# This is the function that will compute the stats for each user
#
def sessionstats(grp):
# groups and dataframes behave a bit differently; life is a bit easier if we DF the group
# also sort each individual group rather than sort the whole classification dataframe; should be much faster
user_class = pd.DataFrame(grp).sort('created_at_ts', ascending=True)
# If the user id is a number, great; if it's blank, keep it blank and don't force it to NaN
try:
theuserid = int(user_class.user_id.iloc[0])
except:
theuserid = user_class.user_id.iloc[0]
# the next 2 lines are why we converted into datetime
user_class['duration'] = user_class.created_at_ts.diff()
user_class['class_length'] = user_class.finished_at - user_class.started_at
# set up the session count
user_class['session'] = [0 for q in user_class.duration]
# because aggregate('count') has a weird bug (sometimes returns n-1 instead), just make a "count" column
# and then aggregate('sum')
user_class['count'] = [1 for q in user_class.duration]
# YYYY-MM-DD only
user_class['created_day'] = [q[:10] for q in user_class.created_at]
n_class = len(user_class)
n_days = len(user_class.created_day.unique())
first_day = user_class.created_day.iloc[0]
last_day = user_class.created_day.iloc[-1]
#front-end version; back-end version uses 'created_at'
tdiff_firstlast_hours = (user_class.finished_at[user_class.index[-1]] - user_class.started_at[user_class.index[0]]).total_seconds() / 3600.
i_firstclass = user_class.index[0]
i_lastclass = user_class.index[-1]
# Figure out where new sessions start, manually dealing with the first classification of the session
thefirst = (user_class.duration >= np.timedelta64(int(session_break), 'm')) | (user_class.index == i_firstclass)
# insession is more useful if for some reason you don't trust started_at and finished_at
# and instead you need to do calculations using 'duration'
insession = np.invert(thefirst)
# start times for each session
starttimes = user_class.created_at_ts[thefirst]
# start dates for each session
startdays = user_class.created_day[thefirst]
# session count; could also do sum(thefirst) but len takes less time than sum
n_sessions = len(starttimes.unique())
# timedeltas are just ints, but interpreted a certain way. So force them to int as needed.
# By default they're in nanoseconds
# Note: there are a multitude of ways these lines could screw up.
# Firstly, numpy has a timedelta data type, and so does pandas, and they don't
# quite behave the same way.
# Secondly, different versions treat them differently, which means that e.g.
# the first version of pandas I used (0.13.1) likes the lines immediately below
# this (now commented out), but upgrading (0.17.1) meant that broke and
# now there's a new format. Frustrating.
# Note: this will filter through to other places where we output times... and
# as this is a script to calculate session length information, there are a
# lot of those.
# class_length_mean_overall = np.mean(user_class.class_length).astype(int) * ns2mins
# class_length_median_overall = np.median(user_class.class_length).astype(int) * ns2mins
class_length_mean_overall = np.mean(user_class.class_length) / np.timedelta64(1, 'm')
class_length_median_overall = np.median(user_class.class_length) / np.timedelta64(1, 'm')
# index this into a timeseries
# this means the index might no longer be unique, but it has many advantages
user_class.set_index('created_at_ts', inplace=True, drop=False)
# now, keep the session count by adding 1 to each element of the timeseries with t > each start time
# not sure how to do this without a loop
for the_start in starttimes.unique():
user_class.session[the_start:] += 1
# Now that we've defined the sessions let's do some calculations
bysession = user_class.groupby('session')
# get classification counts, total session durations, median classification length for each session
# time units in minutes here
# this may give a warning for 1-entry sessions but whatevs
class_length_median = bysession.class_length.apply(lambda x: np.median(x))/datetime.timedelta(minutes=1)
#class_length_total = bysession.class_length.aggregate('sum') * ns2mins
class_length_total = bysession.class_length.aggregate('sum')/ np.timedelta64(1, 'm')
class_count_session = bysession['count'].aggregate('sum')
# make commas into semicolons because we don't want to break the eventual CSV output
class_count_session_list = str(class_count_session.tolist()).replace(',',';')
# below is the back-end version; use if you don't have or don't trust started_at and finished_at
# # ignore the first duration, which isn't a real classification duration but a time between sessions
# dur_median = bysession.duration.apply(lambda x: np.median(x[1:])) /datetime.timedelta(hours=1)
# dur_total = bysession.duration.apply(lambda x: np.sum(x[1:])) # in nanoseconds
# ses_count = bysession.duration.aggregate('count')
# # ses_nproj = bysession.project_name.aggregate(lambda x:x.nunique())
# basic classification count stats per session
count_mean = np.nanmean(class_count_session.astype(float))
count_med = np.median(class_count_session)
count_min = np.min(class_count_session)
count_max = np.max(class_count_session)
session_length_mean = np.nanmean(class_length_total).astype(float)
session_length_median = np.median(class_length_total).astype(float)
session_length_min = np.min(class_length_total)
session_length_max = np.max(class_length_total)
session_length_total = np.sum(class_length_total)
class_length_mean = class_length_total / class_count_session.astype(float)
# nproj_session_med = np.median(ses_nproj)
# nproj_session_mean = np.nanmean(ses_nproj.astype(float))
# nproj_session_min = np.min(ses_nproj)
# nproj_session_max = np.max(ses_nproj)
which_session_longest = class_length_total[class_length_total == np.max(class_length_total)].index[0]
if n_sessions >= 4:
# get durations of first 2 and last 2 sessions
# Note: this idea comes from Sauermann & Franzoni (2015) and their related work
# http://www.pnas.org/content/112/3/679.full
# You can use it to examine whether on average your classifiers are doing
# more or less work per session at the start vs end of their time spent on your project,
# as well as examine the classification duration to see if they are more efficient at
# classifying. Keep in mind the various assumptions you need to make about how the
# intrinsic difficulty of classifying a subject varies (or doesn't) over the length of your
# project in order to do this analysis, etc.
mean_duration_first2 = (class_length_total[1]+class_length_total[2])/2.0
mean_duration_last2 = (class_length_total[n_sessions]+class_length_total[n_sessions-1])/2.0
mean_class_duration_first2 = (class_length_total[1]+class_length_total[2])/(class_count_session[1]+class_count_session[2]).astype(float)
mean_class_duration_last2 = (class_length_total[n_sessions]+class_length_total[n_sessions-1])/(class_count_session[n_sessions]+class_count_session[n_sessions-1]).astype(float)
else:
mean_duration_first2 = 0.0
mean_duration_last2 = 0.0
mean_class_duration_first2 = 0.0
mean_class_duration_last2 = 0.0
# now set up the DF to return
# but keep it as a list until later, which is about 30s shorter when running this function over ~4500 users
# versus setting the Series earlier, so for large classification exports with many thousands of users this will
# make a significant difference.
session_stats = {}
session_stats["user_id"] = theuserid # note: username will be in the index, this is zooid
#session_stats = pd.Series(session_stats) # so the subsequent column ordering is preserved, make it a series now
session_stats["n_class"] = n_class
session_stats["n_sessions"] = n_sessions
session_stats["n_days"] = n_days
session_stats["first_day"] = first_day[:10]
session_stats["last_day"] = last_day[:10]
session_stats["tdiff_firstlast_hours"] = tdiff_firstlast_hours # hours
session_stats["time_spent_classifying_total_minutes"] = session_length_total # minutes
session_stats["class_per_session_min"] = count_min
session_stats["class_per_session_max"] = count_max
session_stats["class_per_session_med"] = count_med
session_stats["class_per_session_mean"] = count_mean
session_stats["class_length_mean_overall"] = float(class_length_mean_overall) # minutes
session_stats["class_length_median_overall"] = class_length_median_overall # minutes
session_stats["session_length_mean"] = session_length_mean # minutes
session_stats["session_length_median"] = session_length_median # minutes
session_stats["session_length_min"] = session_length_min # minutes
session_stats["session_length_max"] = session_length_max # minutes
session_stats["which_session_longest"] = which_session_longest
session_stats["mean_session_length_first2"] = mean_duration_first2 # minutes
session_stats["mean_session_length_last2"] = mean_duration_last2 # minutes
session_stats["mean_class_length_first2"] = mean_class_duration_first2 # minutes
session_stats["mean_class_length_last2"] = mean_class_duration_last2 # minutes
session_stats["class_count_session_list"] = class_count_session_list # semicolon-separated
# lists don't preserve column order so let's manually order
col_order = ['user_id',
'n_class',
'n_sessions',
'n_days',
'first_day',
'last_day',
'tdiff_firstlast_hours',
'time_spent_classifying_total_minutes',
'class_per_session_min',
'class_per_session_max',
'class_per_session_med',
'class_per_session_mean',
'class_length_mean_overall',
'class_length_median_overall',
'session_length_mean',
'session_length_median',
'session_length_min',
'session_length_max',
'which_session_longest',
'mean_session_length_first2',
'mean_session_length_last2',
'mean_class_length_first2',
'mean_class_length_last2',
'class_count_session_list']
return pd.Series(session_stats)[col_order]
#return session_stats
#################################################################################
#################################################################################
#################################################################################
# Get the Gini coefficient - https://en.wikipedia.org/wiki/Gini_coefficient
#
# The Gini coefficient measures inequality in distributions of things.
# It was originally conceived for economics (e.g. where is the wealth in a country?
# in the hands of many citizens or a few?), but it's just as applicable to many
# other fields. In this case we'll use it to see how classifications are
# distributed among classifiers.
# G = 0 is a completely even distribution (everyone does the same number of
# classifications), and ~1 is uneven (~all the classifications are done
# by one classifier).
# Typical values of the Gini for healthy Zooniverse projects (Cox et al. 2015) are
# in the range of 0.7-0.9.
# That range is generally indicative of a project with a loyal core group of
# volunteers who contribute the bulk of the classification effort, but balanced
# out by a regular influx of new classifiers trying out the project, from which
# you continue to draw to maintain a core group of prolific classifiers.
# Once your project is fairly well established, you can compare it to past Zooniverse
# projects to see how you're doing.
# If your G is << 0.7, you may be having trouble recruiting classifiers into a loyal
# group of volunteers. People are trying it, but not many are staying.
# If your G is > 0.9, it's a little more complicated. If your total classification
# count is lower than you'd like it to be, you may be having trouble recruiting
# classifiers to the project, such that your classification counts are
# dominated by a few people.
# But if you have G > 0.9 and plenty of classifications, this may be a sign that your
# loyal users are -really- committed, so a very high G is not necessarily a bad thing.
#
# Of course the Gini coefficient is a simplified measure that doesn't always capture
# subtle nuances and so forth, but it's still a useful broad metric.
def gini(list_of_values):
sorted_list = sorted(list_of_values)
height, area = 0, 0
for value in sorted_list:
height += value
area += height - value / 2.
fair_area = height * len(list_of_values) / 2
return (fair_area - area) / fair_area
#################################################################################
#################################################################################
#################################################################################
# Begin the main stuff
print "Reading classifications from "+classfile_in
classifications = pd.read_csv(classfile_in)
# first, extract the started_at and finished_at from the annotations column
classifications['meta_json'] = [json.loads(q) for q in classifications.metadata]
classifications['started_at_str'] = [q['started_at'] for q in classifications.meta_json]
classifications['finished_at_str'] = [q['finished_at'] for q in classifications.meta_json]
classifications['created_day'] = [q[:10] for q in classifications.created_at]
first_class_day = min(classifications.created_day).replace(' ', '')
last_class_day = max(classifications.created_day).replace(' ', '')
# The next thing we need to do is parse the dates into actual datetimes
# I don't remember why this is needed but I think it's faster to use this below than a for loop on the actual column
ca_temp = classifications['created_at'].copy()
sa_temp = classifications['started_at_str'].copy().str.replace('T',' ').str.replace('Z', '')
fa_temp = classifications['finished_at_str'].copy().str.replace('T',' ').str.replace('Z', '')
print "Creating timeseries..."#,datetime.datetime.now().strftime('%H:%M:%S.%f')
# Do these separately so you can track errors to a specific line
# Try the format-specified ones first (because it's faster, if it works)
try:
classifications['created_at_ts'] = pd.to_datetime(ca_temp, format='%Y-%m-%d %H:%M:%S %Z')
except Exception as the_error:
print "Oops:\n", the_error
try:
classifications['created_at_ts'] = pd.to_datetime(ca_temp, format='%Y-%m-%d %H:%M:%S')
except Exception as the_error:
print "Oops:\n", the_error
classifications['created_at_ts'] = pd.to_datetime(ca_temp)
try:
classifications['started_at'] = pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S.%f')
except Exception as the_error:
print "Oops:\n", the_error
try:
classifications['started_at'] = pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S %Z')
except Exception as the_error:
print "Oops:\n", the_error
classifications['started_at'] = pd.to_datetime(sa_temp)
try:
classifications['finished_at'] = pd.to_datetime(fa_temp, format='%Y-%m-%d %H:%M:%S.%f')
except Exception as the_error:
print "Oops:\n", the_error
try:
classifications['finished_at'] = pd.to_datetime(fa_temp, format='%Y-%m-%d %H:%M:%S %Z')
except Exception as the_error:
print "Oops:\n", the_error
classifications['finished_at'] = pd.to_datetime(fa_temp)
# grab the subject counts
n_subj_tot = len(classifications.subject_data.unique())
by_subject = classifications.groupby('subject_data')
subj_class = by_subject.created_at.aggregate('count')
# basic stats on how classified the subjects are
subj_class_mean = np.mean(subj_class)
subj_class_med = np.median(subj_class)
subj_class_min = np.min(subj_class)
subj_class_max = np.max(subj_class)
# save processing time and memory in the groupby.apply(); only keep the columns we're going to use
classifications = classifications[cols_used]
# index by created_at as a timeseries
# note: this means things might not be uniquely indexed
# but it makes a lot of things easier and faster.
# update: it's not really needed in the main bit, but will do it on each group later.
#classifications.set_index('created_at_ts', inplace=True)
all_users = classifications.user_name.unique()
by_user = classifications.groupby('user_name')
# get total classification and user counts
n_class_tot = len(classifications)
n_users_tot = len(all_users)
unregistered = [q.startswith("not-logged-in") for q in all_users]
n_unreg = sum(unregistered)
n_reg = n_users_tot - n_unreg
# for the leaderboard, which I recommend project builders never make public because
# Just Say No to gamification
# But it's still interesting to see who your most prolific classifiers are, and
# e.g. whether they're also your most prolific Talk users
nclass_byuser = by_user.created_at.aggregate('count')
nclass_byuser_ranked = nclass_byuser.copy()
nclass_byuser_ranked.sort(ascending=False)
# very basic stats
nclass_med = np.median(nclass_byuser)
nclass_mean = np.mean(nclass_byuser)
# Gini coefficient - see the comments above the gini() function for more notes
nclass_gini = gini(nclass_byuser)
print "\nOverall:\n\n",n_class_tot,"classifications of",n_subj_tot,"subjects by",n_users_tot,"classifiers,"
print n_reg,"registered and",n_unreg,"unregistered.\n"
print "That's %.2f classifications per subject on average (median = %.1f)." % (subj_class_mean, subj_class_med)
print "The most classified subject has ",subj_class_max,"classifications; the least-classified subject has",subj_class_min,".\n"
print "Median number of classifications per user:",nclass_med
print "Mean number of classifications per user: %.2f" % nclass_mean
print "\nTop 10 most prolific classifiers:\n",nclass_byuser_ranked.head(10)
print "\n\nGini coefficient for classifications by user: %.2f\n" % nclass_gini
# compute the per-user stats
# alas I don't know of a way to print a progress bar or similar for group.apply() functions
# addition: apparently there's "pip install progressbar", but I haven't tried it yet, feel free to hack
# For a small classification file this is fast, but if you have > 1,000,000 this may be slow
# (albeit still much faster than a loop or similar)
# For a test file with 175,000 classifications and ~4,500 users it takes just under 90 seconds.
print "\nComputing session stats for each user...",datetime.datetime.now().strftime('%H:%M:%S.%f')
session_stats = by_user.apply(sessionstats)
# If no stats file was supplied, add the start and end dates in the classification file to the output filename
if modstatsfile:
statsfile_out = statsfile_out.replace('.csv', '_'+first_class_day+'_to_'+last_class_day+'.csv')
print "Writing to file", statsfile_out,"...",datetime.datetime.now().strftime('%H:%M:%S.%f')
session_stats.to_csv(statsfile_out)
| gpl-2.0 |
manderelee/csc2521_final | scripts/svm.py | 1 | 1045 | import sys
from utils import *
from sklearn import decomposition
from sklearn import svm
from sklearn.model_selection import train_test_split
if __name__ == "__main__":
if len(sys.argv) != 3:
print ("python svm.py /path/to/pos/ex /path/to/neg/ex")
else:
# Truncate data to equalize number of examples from each set
num_data = 1000
pos_data = read_csv(sys.argv[1])[:num_data]
neg_data = read_csv(sys.argv[2])[:num_data]
data = np.concatenate((pos_data, neg_data))
labels = np.array([1]*num_data + [0]*num_data)
data_train, data_test, labels_train, labels_test = train_test_split(data, labels, test_size = 0.3, random_state=0)
pca = decomposition.PCA(n_components=5)
pca.fit(data_train)
data_train = pca.transform(data_train)
data_test = pca.transform(data_test)
clf = svm.SVC(kernel='linear')
clf.fit(data_train, labels_train)
score = clf.score(data_test, labels_test)
print score
| mpl-2.0 |
dalejung/trtools | trtools/core/timeseries.py | 1 | 12879 | from datetime import datetime, time, date
from functools import partial
from dateutil import relativedelta
import calendar
from pandas import DateOffset, datetools, DataFrame, Series, Panel
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.resample import _get_range_edges
from pandas.core.groupby import DataFrameGroupBy, PanelGroupBy, BinGrouper
from pandas.tseries.resample import TimeGrouper
from pandas.tseries.offsets import Tick
from pandas.tseries.frequencies import _offset_map, to_offset
import pandas.lib as lib
import numpy as np
from trtools.monkey import patch, patch_prop
def _is_tick(offset):
return isinstance(offset, Tick)
## TODO See if I still need this. All this stuff was pre resample
def first_day(year, month, bday=True):
"""
Return first day of month. Default to business days
"""
weekday, days_in_month = calendar.monthrange(year, month)
if not bday:
return 1
if weekday <= 4:
return 1
else:
return 7-weekday+1
class MonthStart(DateOffset):
"""
Really the point of this is for DateRange, creating
a range where the month is anchored on day=1 and not the end
"""
def apply(self, other):
first = first_day(other.year, other.month)
if other.day == first:
result = other + relativedelta.relativedelta(months=1)
result = result.replace(day=first_day(result.year, result.month))
else:
result = other.replace(day=first)
return datetime(result.year, result.month, result.day)
def onOffset(self, someDate):
return someDate.day == first_day(someDate.year, someDate.month)
def daily_group(df):
daterange_func = partial(DatetimeIndex, freq=datetools.day)
return down_sample(df, daterange_func)
def weekly_group(df):
daterange_func = partial(DatetimeIndex, freq="W@MON")
return down_sample(df, daterange_func)
def monthly_group(df):
daterange_func = partial(DatetimeIndex, freq=MonthStart())
return down_sample(df, daterange_func)
def down_sample(obj, daterange_func):
if isinstance(obj, Panel):
index = obj.major_axis
else:
index = obj.index
start = datetime.combine(index[0].date(), time(0))
end = datetime.combine(index[-1].date(), time(0))
range = daterange_func(start=start, end=end)
grouped = obj.groupby(range.asof)
grouped._range = range
return grouped
# END TODO
def cols(self, *args):
return self.xs(list(args), axis=1)
def dropna_get(x, pos):
try:
return x.dropna().iget(pos)
except:
return None
def aggregate_picker(grouped, grouped_indices, col=None):
"""
In [276]: g.agg(np.argmax).high
Out[276]:
key_0
2007-04-27 281
2007-04-30 0
2007-05-01 5
2007-05-02 294
2007-05-03 3
2007-05-04 53
Should take something in that form and return a DataFrame with the proper date indexes and values...
"""
index = []
values = []
for key, group in grouped:
if col:
group = group[col]
sub_index = grouped_indices[key]
index.append(group.index[sub_index])
values.append(group.iget_value(sub_index))
return {'index':index, 'values':values}
# old version
def _kv_agg(grouped, func, col=None):
"""
Works like agg but returns index label and value for each hit
"""
if col:
sub_indices = grouped.agg({col: func})[col]
else:
sub_indices = grouped.agg(func)
data = aggregate_picker(grouped, sub_indices, col=col)
return TimeSeries(data['values'], index=data['index'])
def kv_agg(grouped, func, col=None):
"""
Simpler version that is a bit faster. Really, I don't use aggregate_picker,
which makes it slightly faster.
"""
index = []
values = []
for key, group in grouped:
if col:
group = group[col]
sub_index = func(group)
val = group.iget_value(sub_index)
values.append(val)
index.append(group.index[sub_index])
return TimeSeries(values, index=index)
def set_time(arr, hour, minute):
"""
Given a list of datetimes, set the time on all of them
"""
results = []
t = time(hour, minute)
for date in arr:
d = datetime.combine(date.date(), t)
results.append(d)
return results
def reset_time(df, hour, minute):
if isinstance(df, (DataFrame, Series)):
df.index = set_time(df.index, hour, minute)
if isinstance(df, Panel):
df.major_axis = set_time(df.major_axis, hour, minute)
return df
def max_groupby(grouped, col=None):
df = kv_agg(grouped, np.argmax, col)
return df
def trading_hours(df):
# assuming timestamp marks end of bar
inds = df.index.indexer_between_time(time(9,30),
time(16), include_start=False)
return df.take(inds)
times = np.vectorize(lambda x: x.time())
hours = np.vectorize(lambda x: x.time().hour)
minutes = np.vectorize(lambda x: x.time().minute)
def time_slice(series, hour=None, minute=None):
"""
Will vectorize a function taht returns a boolean array if value matches the hour
and/or minute
"""
bh = hour is not None
bm = minute is not None
if bh and bm:
t = time(hour, minute)
vec = np.vectorize(lambda x: x.time() == t)
if bh and not bm:
vec = np.vectorize(lambda x: x.time().hour == hour)
if not bh and bm:
vec = np.vectorize(lambda x: x.time().minute == minute)
return vec(series.index)
def end_asof(index, label):
"""
Like index.asof but places the timestamp to the end of the bar
"""
if label not in index:
loc = index.searchsorted(label, side='left')
if loc > 0:
return index[loc]
else:
return np.nan
return label
# TODO Forget where I was using this. I think pandas does this now.
class TimeIndex(object):
"""
Kind of like a DatetimeIndex, except it only cares about the time component of a Datetime object.
"""
def __init__(self, times):
self.times = times
def asof(self, date):
"""
Follows price is right rules. Will return the closest time that is equal or below.
If time is after the last date, it will just return the date.
"""
testtime = date.time()
last = None
for time in self.times:
if testtime == time:
return date
if testtime < time:
# found spot
break
last = time
# TODO should I anchor this to the last time?
if last is None:
return date
new_date = datetime.combine(date.date(), last)
return new_date
def get_time_index(freq, start=None, end=None):
if start is None:
start = "1/1/2012 9:30AM"
if end is None:
end = "1/1/2012 4:00PM"
ideal = DatetimeIndex(start=start, end=end, freq=freq)
times = [date.time() for date in ideal]
return TimeIndex(times)
def get_anchor_index(index, freq):
ideal = get_time_index(freq)
start = index[0]
start = ideal.asof(start)
end = index[-1]
start, end = _get_range_edges(index, offset=freq, closed='right')
ind = DatetimeIndex(start=start, end=end, freq=freq)
return ind
def anchor_downsample(obj, freq, axis=None):
"""
Point of this is to fix the freq to regular intervals like 9:30, 9:45, 10:00
and not 9:13, 9:28: 9:43
"""
if axis is None:
axis = 0
if isinstance(obj, Panel):
axis = 1
index = obj._get_axis(axis)
ind = get_anchor_index(index, freq)
bins = lib.generate_bins_dt64(index.asi8, ind.asi8, closed='right')
labels = ind[1:]
grouper = BinGrouper(bins, labels)
return obj.groupby(grouper)
# END TODO
cython_ohlc = {
'open':'first',
'high': 'max',
'low': 'min',
'close': 'last',
'vol': 'sum'
}
def ohlc_grouped_cython(grouped):
"""
Cython one is much faster. Should be same as old
ohlc version
"""
hldf = grouped.agg(cython_ohlc)
# set column order back
hldf = hldf.reindex(columns=['open', 'high', 'low', 'close', 'vol'])
return hldf
# monkey patches
@patch(DataFrameGroupBy, 'ohlc')
def ohlc(self):
return ohlc_grouped_cython(self)
LEFT_OFFSETS = [
'D',
'B',
'W',
'MS',
'BMS',
'AS',
'BAS',
'QS',
'BQS',
]
def _offset_defaults(freq):
offset = to_offset(freq)
base = offset.rule_code.split('-')[0]
if base in LEFT_OFFSETS:
return {'closed':'left', 'label': 'left'}
return {'closed':'right', 'label': 'right'}
class Downsample(object):
def __init__(self, obj, axis=0):
self.obj = obj
self.axis = axis
def __call__(self, freq, closed=None, label=None, axis=None, drop_empty=True):
if axis is None:
axis = self.axis
return downsample(self.obj, freq=freq, closed=closed, label=label, axis=axis,
drop_empty=drop_empty)
def __getattr__(self, key):
key = key.replace('_', '-')
def wrap(stride=None, closed=None, label=None, axis=None):
offset = to_offset(key)
if stride is not None:
offset = offset * stride
return self(offset, closed, label, axis)
return wrap
def _completers(self):
return [k.replace('-', '_') for k in list(_offset_map.keys()) if k]
@patch_prop([DataFrame, Series], 'downsample')
def downsample_prop(self):
return Downsample(self)
@patch_prop([Panel], 'downsample')
def downsample_prop_panel(self):
return Downsample(self, axis=1)
def downsample(self, freq, closed=None, label=None, axis=0, drop_empty=True):
"""
Essentially use resample logic but reutrning the groupby object
"""
# default closed/label on offset
defaults = _offset_defaults(freq)
if closed is None:
closed = defaults['closed']
if label is None:
label = defaults['label']
tg = TimeGrouper(freq, closed=closed, label=label, axis=axis)
groupby = self.groupby(tg)
grouper = groupby.grouper
# drop empty groups. this is when we have irregular data that
# we just want to group into Daily without creating empty days.
if drop_empty:
bins = [0] # start with 0 for np.diff
bins.extend(grouper.bins)
bins = np.array(bins)
periods_in_bin = np.diff(bins)
empty = periods_in_bin == 0
binlabels = grouper.binlabels
# skip the 0 we added
bins = bins[1:][~empty]
binlabels = binlabels[~empty]
grouper = BinGrouper(bins, binlabels)
return self.groupby(grouper, axis=axis)
# Quick groupbys. _rs stands for resample, though they really use TimeGrouper.
# Eventuall take out the old groupbys once everything is verified to be equal
@patch([DataFrame, Series], 'fillforward')
def fillforward(df):
"""
Take a lower than day freq, and map it to business days.
This is to make mapping to a daily chart easy and helps handle
business days that vacations.
"""
return df.asfreq(datetools.BDay(), method='pad')
@patch([Series], 'date')
def to_date(self):
"""
Return a normalized index. Will work on anything convertible to
DatetimeIndex
# note, this won't normalize the Series.index
"""
dt = DatetimeIndex(self).normalize()
return dt
@patch([DataFrame, Series])
def normalize(self, inplace=False):
"""
Return DataFrame with index normalized (we call date)
"""
if not inplace:
self = self.copy()
index = self.index.normalize()
self.index = index
return self
@patch_prop([DatetimeIndex], 'time')
def dt_time(self):
return np.array([time(*time_tuple) for time_tuple in zip(self.hour, self.minute, self.second)])
@patch_prop([DatetimeIndex], 'date')
def dt_date(self):
return np.array([date(*date_tuple) for date_tuple in zip(self.year, self.month, self.day)])
# IPYTYHON
# Autocomplete the target endpoint
def install_ipython_completers(): # pragma: no cover
from pandas import compat
from IPython.utils.generics import complete_object
@complete_object.when_type(Downsample)
def complete_column_panel(self, prev_completions):
return [c for c in self._completers() \
if isinstance(c, str) and compat.isidentifier(c)]
# Importing IPython brings in about 200 modules, so we want to avoid it unless
# we're in IPython (when those modules are loaded anyway).
import sys
if "IPython" in sys.modules: # pragma: no cover
try:
install_ipython_completers()
except Exception:
pass
| mit |
jjberry/Autotrace | matlab-version/image_diversityNEW.py | 3 | 17500 | #!/usr/bin/env python
"""
image_diversityNEW.py
Rewritten by Gus Hahn-Powell on March 7 2014
based on ~2010 code by Jeff Berry
purpose:
This script measures the distance from average for each image in the
input set, and copies the specified number of highest scoring images
to a new folder called 'diverse'. If ROI_config.txt is present in the
same folder as the input images, the ROI in that file will be used to
do the measurement. If not present, it will use a hard-coded default ROI.
usage:
python image_diversity.py
"""
import cv
import re
import shutil
import os, sys
import operator
from numpy import *
from collections import defaultdict
import subprocess as sp
import multiprocessing as mp
import matplotlib.pyplot as plot
import gtk
import gtk.glade
log_file = os.path.join(os.getcwd(), "tmp_log")
image_extension_pattern = re.compile("(\.(png|jpg)$)", re.IGNORECASE)
'''
#change this to make use of multiprocessing.pool?
class CopyThread(multiprocessing.Process):
def run(self):
flag = 'ok'
while (flag != 'stop'):
cmd = CopyQueue.get()
if cmd == None:
flag = 'stop'
else:
#print ' '.join(cmd)
p = sp.Popen(cmd)
p.wait()
FinishQueue.put(cmd)
#print "CopyThread stopped"
'''
class ImageWindow:
"""
"""
def __init__(self):
gladefile = "ImageDiversity.glade"
self.wTree = gtk.glade.XML(gladefile, "window1")
self.win = self.wTree.get_widget("window1")
self.win.set_title("Image Diversity")
dic = { "on_window1_destroy" : gtk.main_quit,
"on_open1_clicked" : self.openImages,
"on_open2_clicked" : self.openDest,
"on_ok_clicked" : self.onOK}
self.wTree.signal_autoconnect(dic)
self.srcfileentry = self.wTree.get_widget("srcfileentry")
self.dstfileentry = self.wTree.get_widget("dstfileentry")
#initialized to None...
self.destpath = None
self.train_most = self.wTree.get_widget("train_most") #Select N images
self.train_least = self.wTree.get_widget("train_least") #Select n test?
self.test_most = self.wTree.get_widget("test_most")
self.test_least = self.wTree.get_widget("test_least")
self.remaining = self.wTree.get_widget("remaining")
self.batches = self.wTree.get_widget("batches")
#assign 0 if not coercible to type int...
self.safe_set_all()
self.train_most.connect("changed", self.update_remaining)
self.train_least.connect("changed", self.update_remaining)
self.test_most.connect("changed", self.update_remaining)
self.test_least.connect("changed", self.update_remaining)
self.batches.connect("changed", self.update_remaining)
self.images = []
self.traces = []
self.images_dir = None
self.traces_dir = None
self.n = len(self.images)
self.remaining.set_text(str(self.n))
self.update_remaining()
self.log_file = ""
def logger(self, message, log_file=log_file):
"""
logger
"""
with open(log_file, 'a') as lg:
lg.write("{0}\n".format(message))
def get_roi(self):
"""
Get Region of Interest (RoI) for selected images
"""
# get an image and open it to see the size
img = cv.LoadImageM(self.images[0], iscolor=False)
self.csize = shape(img)
self.img = asarray(img)
#open up the ROI_config.txt and parse
self.logger("images_dir: {0}".format(self.images_dir))
#see if the ROI_config.txt file exists at the specified directory...should we instead launch SelectROI.py?
self.config = os.path.join(self.images_dir,'ROI_config.txt') if os.path.exists(os.path.join(self.images_dir,'ROI_config.txt')) else None
self.logger("self.config: {0}".format(self.config))
if self.config:
self.logger("Found ROI_config.txt")
c = open(self.config, 'r').readlines()
self.top = int(c[1][:-1].split('\t')[1])
self.bottom = int(c[2][:-1].split('\t')[1])
self.left = int(c[3][:-1].split('\t')[1])
self.right = int(c[4][:-1].split('\t')[1])
self.logger("using ROI: [%d:%d, %d:%d]" % (self.top, self.bottom, self.left, self.right))
else:
self.logger("ROI_config.txt not found")
self.top = 140 #default settings for the Sonosite Titan
self.bottom = 320
self.left = 250
self.right = 580
self.logger("using ROI: [%d:%d, %d:%d]" % (self.top, self.bottom, self.left, self.right))
roi = img[self.top:self.bottom, self.left:self.right]
self.roisize = shape(roi)
def safe_set(self, entry, value=""):
"""
Make sure entered text is coercible to type int
"""
try:
int(entry.get_text())
except:
entry.set_text(value)
def safe_set_all(self, value=""):
"""
"""
entries = [self.train_most, self.train_least, self.test_most, self.test_least, self.remaining, self.batches]
for entry in entries:
try:
int(entry.get_text())
except:
entry.set_text(value)
def safe_get(self, entry):
"""
Safely return an int (default is 0)
from a specified entry
"""
try:
return int(entry.get_text())
except:
return 0
def openImages(self, event):
"""
Allows user to select multiple images (jpg or png)
"""
fc = gtk.FileChooserDialog(title='Select Image Files', parent=None,
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder() if fc.get_current_folder() else os.path.expanduser("~")
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
fc.set_select_multiple(True)
ffilter = gtk.FileFilter()
ffilter.set_name('Image Files')
ffilter.add_pattern('*.jpg')
ffilter.add_pattern('*.png')
fc.add_filter(ffilter)
response = fc.run()
if response == gtk.RESPONSE_OK:
self.images_dir = fc.get_current_folder() #set this to an attribute?
self.images = [os.path.join(self.images_dir, f) for f in fc.get_filenames() if re.search(image_extension_pattern, f)]
self.logger("{0} images found".format(len(self.images)))
self.logger("images: {0}".format("\n".join(self.images)))
self.n = len(self.images)
self.update_remaining()
self.srcfileentry.set_text(self.images_dir)
fc.destroy()
self.get_roi()
self.openTraces()
def openTraces(self):
"""
Allows user to select multiple trace files (traced.txt)
"""
fc = gtk.FileChooserDialog(title='Select Trace Files', parent=None,
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder() if fc.get_current_folder() else self.images_dir
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
fc.set_select_multiple(True)
ffilter = gtk.FileFilter()
ffilter.set_name('Trace Files')
ffilter.add_pattern('*.traced.txt')
fc.add_filter(ffilter)
response = fc.run()
if response == gtk.RESPONSE_OK:
self.traces_dir = fc.get_current_folder() #set this to an attribute?
#should probably filter traces here (make sure images and traces match)
self.traces = [os.path.join(self.images_dir, f) for f in fc.get_filenames() if "traced.txt" in f]
self.logger("{0} traces found".format(len(self.traces)))
self.logger("traces: {0}".format("\n".join(self.traces)))
fc.destroy()
self.get_tracenames()
def openDest(self, event):
"""
"""
fc = gtk.FileChooserDialog(title='Select Save Destination', parent=None,
action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder() if fc.get_current_folder() else os.path.expanduser("~")
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
response = fc.run()
if response == gtk.RESPONSE_OK:
self.destpath = fc.get_current_folder()
self.dstfileentry.set_text(self.destpath)
self.log_file = os.path.join(self.destpath, "diversity_log")
fc.destroy()
def makeDest(self):
"""
"""
#TODO: add this into openDest?
diverse_dir = os.path.join(self.destpath, "diverse")
self.logger("images will be saved in " + diverse_dir)
if not os.path.isdir(diverse_dir):
os.mkdir(diverse_dir)
self.logger("created directory" + diverse_dir)
def get_tracenames(self):
"""
This method will look for existing trace files and create a dictionary to corresponding
image files. It will only work if all image files are in the same directory
"""
#3/8/2014 (Gus): Changed to support multiple corresponding traces...
self.tracenames = defaultdict(list)
for image in self.images:
#get image name...
image_name = os.path.basename(image)
for trace in self.traces:
#get trace name...
trace_name = os.path.basename(trace)
if image_name in trace_name:
self.logger("image: {0}\ttrace: {1}".format(image_name, trace_name))
self.tracenames[image].append(trace)
def update_remaining(self, *args):
"""
update the number of images available
for training and test sets, given user's
input
"""
self.safe_set_all()
#need to safely get a value or assign zero if nothing
self.check_remaining()
#print "remaining: {0}".format(self.remaining.get_text())
self.remaining.set_text(str(self.n - self.safe_get(self.train_most) - self.safe_get(self.train_least)))
#make sure we don't have more batches than remaining...
if self.safe_get(self.batches) > self.safe_get(self.remaining):
self.batches.set_text(str(self.remaining))
def check_remaining(self):
"""
"""
#test values come out of training numbers, not overall pool
#rest test_most if value exceeds possible
self.safe_set_all()
if self.safe_get(self.test_most) > self.safe_get(self.train_most):
self.test_most.set_text("")
if self.safe_get(self.test_least) > self.safe_get(self.train_least):
self.test_least.set_text("")
#did we try to pick too many items?
if self.safe_get(self.train_most) + self.safe_get(self.train_least) > self.n:
self.train_most.set_text("")
self.train_least.set_text("")
def get_average_image(self):
"""
creates an average image from
a set of images and a corresponding RoI
"""
files = self.images
ave_img = zeros(self.roisize)
for i in range(len(files)):
img = cv.LoadImageM(files[i], iscolor=False)
roi = img[self.top:self.bottom, self.left:self.right]
roi = asarray(roi)/255.
ave_img += roi
ave_img /= len(files)
return ave_img, files
def make_train_test(self, images, training_n, testing_n=None):
"""
takes a list of images and test and training sizes
returns two lists of non-overlapping images (training, testing)
"""
images_array = array(images)
images_indices = arange(len(images_array))
random.shuffle(images_indices)
traininds = images_indices[:training_n]
trainfiles = images_array[traininds]
testfiles = []
#make sure we have a test set
if testing_n:
testinds = images_indices[training_n:training_n+testing_n]
testfiles = images_array[testinds]
#return training, testing
return list(trainfiles), list(testfiles)
def move_files(self, images, destination, image_class="??"):
"""
"""
#move our test files...
self.logger("Moving {0} {1} files...".format(len(images), image_class))
for image in images:
image_name = os.path.basename(image)
dest = os.path.join(destination, image_name)
shutil.copy(image, dest)
if image in self.tracenames:
#should I average the traces instead?
for trace in self.tracenames[image]:
trace_name = os.path.basename(trace)
dest = os.path.join(destination, trace_name)
self.logger("image: {0}".format(image))
self.logger("trace source: {0}".format(trace))
self.logger("trace dest: {0}\n".format(dest))
shutil.copy(trace, dest)
def plot_diversity(self, sorted_results):
"""
"""
#show rank vs. energy plot
count = 0
for (i,j) in sorted_results:
count += 1
plot.plot(count, j, 'b.')
#add confirmation dialog that prompts for save location when ok is clicked
#plot.savefig(os.path.join(self.destpath, 'rankVenergy.png'))
plot.title("rank vs. energy plot for {0} images".format(count))
plot.ylabel('Diversity score')
plot.xlabel('Rank')
#remove x axis ticks
#plot.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
plot.show()
def get_diverse(self):
"""
get specified diversity set and
then copy relevant files to specified location
"""
batches = self.safe_get(self.batches)
if os.path.isdir(self.destpath):
self.logger("calculating average image...")
ave_img, files = self.get_average_image()
self.logger("measuring distances from average...")
results = {}
for i in range(len(self.images)):
img = cv.LoadImageM(self.images[i], iscolor=False)
roi = img[self.top:self.bottom, self.left:self.right]
roi = asarray(roi)/255.
dif_img = abs(roi - ave_img)
results[self.images[i]] = sum(sum(dif_img))
sorted_results = sorted(results.iteritems(), key=operator.itemgetter(1), reverse=True)
#plot rank vs diversity
self.plot_diversity(sorted_results)
most_diverse_n = self.safe_get(self.train_most)
least_diverse_n = self.safe_get(self.train_least)
test_most_diverse_n = self.safe_get(self.test_most)
test_least_diverse_n = self.safe_get(self.test_least)
training_most_diverse_n = most_diverse_n - test_most_diverse_n
training_least_diverse_n = least_diverse_n - test_least_diverse_n
test_size = test_most_diverse_n + test_least_diverse_n
self.logger("test size: {0}".format(test_size))
#remove test size from training size...
train_size = most_diverse_n + least_diverse_n - test_size
self.logger("training size: {0}".format(train_size))
all_images = [image for (image, _) in sorted_results]
most_diverse_images = []
least_diverse_images = []
#get n most diverse...
if most_diverse_n > 0:
self.logger("Selecting {0} most diverse images...".format(most_diverse_n))
for (image, score) in sorted_results[:most_diverse_n]:
self.logger("file: {0}\ndiversity score: {1}\n".format(image, score))
most_diverse_images.append(image)
#get most diverse for testing and training...
training_most_diverse, testing_most_diverse = self.make_train_test(most_diverse_images, training_n=training_most_diverse_n, testing_n=test_most_diverse_n)
else:
training_most_diverse = []
testing_most_diverse = []
#get n least diverse...
if least_diverse_n > 0:
self.logger("Selecting {0} least diverse images...".format(least_diverse_n))
#take the specified n least diverse...
for (image, score) in sorted_results[-1*least_diverse_n:]:
self.logger("file: {0}\ndiversity score: {1}\n".format(image, score))
least_diverse_images.append(image)
#get least diverse for testing and training...
training_least_diverse, testing_least_diverse = self.make_train_test(least_diverse_images, training_n=training_least_diverse_n, testing_n=test_least_diverse_n)
else:
training_least_diverse = []
testing_least_diverse = []
#make test, training, and batch file sets...
trainfiles = training_most_diverse + training_least_diverse
testfiles = testing_most_diverse + testing_least_diverse
#find remaining...
selected = set(trainfiles + testfiles)
remainingfiles = [image for image in all_images if image not in selected]
#prepare directory for training files...
self.traindir = os.path.join(self.destpath, "train")
if not os.path.isdir(self.traindir):
os.mkdir(self.traindir)
#move training files (edit this)...
self.move_files(trainfiles, destination=self.traindir, image_class="training")
#are we generating a test set?
if test_size > 0:
#prepare directory for test files...
self.testdir = os.path.join(self.destpath, "test")
if not os.path.isdir(self.testdir):
os.mkdir(self.testdir)
#move our test files...
self.move_files(testfiles, destination=self.testdir, image_class="test")
#get remaining and make n batches...
if batches > 0:
b_num = 1
#numpy trick works here...
for batch_files in array_split(array(remainingfiles), batches):
#pad batch folder name with some zeros
batch_name = "batch%03d" % (b_num)
self.logger("files in {0}: {1}".format(batch_name, len(batch_files)))
batch_dir = os.path.join(self.destpath, batch_name)
if not os.path.isdir(batch_dir):
os.mkdir(batch_dir)
#move batch files
self.move_files(batch_files, destination=batch_dir, image_class=batch_name)
#increment batch...
b_num+=1
# write sorted_results to a .txt file for future reference
# added Mar 10 2011 by Jeff Berry
o = open(os.path.join(self.destpath, 'SortedResults.txt'), 'w')
for (i,j) in sorted_results:
o.write("%s\t%.4f\n" %(i, j))
o.close()
#move ROI file...
roifile = os.path.join(self.images_dir, "ROI_config.txt")
if os.path.isfile(roifile):
self.logger("moving ROI_config.txt to {0}".format(roifile))
shutil.copy(self.destpath, "ROI_config.txt")
def onOK(self, event):
"""
"""
if not self.destpath or not self.images or self.safe_get(self.train_most) == 0:
#run error dialog and return...
error_dialog = gtk.MessageDialog(parent=None, type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_CLOSE, message_format="Some of your settings are missing...")
error_dialog.run()
error_dialog.destroy()
return
self.get_roi()
self.get_diverse()
gtk.main_quit()
self.logger("exiting...")
shutil.move(log_file, self.log_file)
if __name__ == "__main__":
ImageWindow()
gtk.main()
| mit |
MartinSavc/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 230 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
JeanKossaifi/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
JazzeYoung/VeryDeepAutoEncoder | pylearn2/testing/skip.py | 49 | 1363 | """
Helper functions for determining which tests to skip.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from nose.plugins.skip import SkipTest
import os
from theano.sandbox import cuda
scipy_works = True
try:
import scipy
except ImportError:
# pyflakes gets mad if you set scipy to None here
scipy_works = False
sklearn_works = True
try:
import sklearn
except ImportError:
sklearn_works = False
h5py_works = True
try:
import h5py
except ImportError:
h5py_works = False
matplotlib_works = True
try:
from matplotlib import pyplot
except ImportError:
matplotlib_works = False
def skip_if_no_data():
if 'PYLEARN2_DATA_PATH' not in os.environ:
raise SkipTest()
def skip_if_no_scipy():
if not scipy_works:
raise SkipTest()
def skip_if_no_sklearn():
if not sklearn_works:
raise SkipTest()
def skip_if_no_gpu():
if cuda.cuda_available == False:
raise SkipTest('Optional package cuda disabled.')
def skip_if_no_h5py():
if not h5py_works:
raise SkipTest()
def skip_if_no_matplotlib():
if not matplotlib_works:
raise SkipTest("matplotlib and pyplot are not available")
| bsd-3-clause |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/tseries/offsets.py | 9 | 87012 | from datetime import date, datetime, timedelta
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.tseries.tools import to_datetime
from pandas.tseries.timedeltas import to_timedelta
from pandas.core.common import ABCSeries, ABCDatetimeIndex
# import after tools, dateutil check
from dateutil.relativedelta import relativedelta, weekday
from dateutil.easter import easter
import pandas.tslib as tslib
from pandas.tslib import Timestamp, OutOfBoundsDatetime, Timedelta
import functools
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'CBMonthEnd','CBMonthBegin',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
'BusinessHour',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
'LastWeekOfMonth', 'FY5253Quarter', 'FY5253',
'Week', 'WeekOfMonth', 'Easter',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset']
# convert to/from datetime/timestamp to allow invalid Timestamp ranges to pass thru
def as_timestamp(obj):
if isinstance(obj, Timestamp):
return obj
try:
return Timestamp(obj)
except (OutOfBoundsDatetime):
pass
return obj
def as_datetime(obj):
f = getattr(obj,'to_pydatetime',None)
if f is not None:
obj = f()
return obj
def apply_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
if other is tslib.NaT:
return tslib.NaT
elif isinstance(other, (timedelta, Tick, DateOffset)):
# timedelta path
return func(self, other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = as_timestamp(other)
tz = getattr(other, 'tzinfo', None)
nano = getattr(other, 'nanosecond', 0)
try:
if self._adjust_dst and isinstance(other, Timestamp):
other = other.tz_localize(None)
result = func(self, other)
if self._adjust_dst:
result = tslib._localize_pydatetime(result, tz)
result = Timestamp(result)
if self.normalize:
result = result.normalize()
# nanosecond may be deleted depending on offset process
if not self.normalize and nano != 0:
if not isinstance(self, Nano) and result.nanosecond != nano:
if result.tz is not None:
# convert to UTC
value = tslib.tz_convert_single(result.value, 'UTC', result.tz)
else:
value = result.value
result = Timestamp(value + nano)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
except OutOfBoundsDatetime:
result = func(self, as_datetime(other))
if self.normalize:
# normalize_date returns normal datetime
result = normalize_date(result)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
return result
return wrapper
def apply_index_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
result = func(self, other)
if self.normalize:
result = result.to_period('D').to_timestamp()
return result
return wrapper
def _is_normalized(dt):
if (dt.hour != 0 or dt.minute != 0 or dt.second != 0
or dt.microsecond != 0 or getattr(dt, 'nanosecond', 0) != 0):
return False
return True
#----------------------------------------------------------------------
# DateOffset
class ApplyTypeError(TypeError):
# sentinel class for catching the apply error to return NotImplemented
pass
class CacheableOffset(object):
_cacheable = True
class DateOffset(object):
"""
Standard kind of date increment used for a date range.
Works exactly like relativedelta in terms of the keyword args you
pass in, use of the keyword n is discouraged-- you would be better
off specifying n in the keywords you use, but regardless it is
there for you. n is needed for DateOffset subclasses.
DateOffets work as follows. Each offset specify a set of dates
that conform to the DateOffset. For example, Bday defines this
set to be the set of dates that are weekdays (M-F). To test if a
date is in the set of a DateOffset dateOffset we can use the
onOffset method: dateOffset.onOffset(date).
If a date is not on a valid date, the rollback and rollforward
methods can be used to roll the date to the nearest valid date
before/after the date.
DateOffsets can be created to move dates forward a given number of
valid dates. For example, Bday(2) can be added to a date to move
it two business days forward. If the date does not start on a
valid date, first it is moved to a valid date. Thus psedo code
is:
def __add__(date):
date = rollback(date) # does nothing if date is valid
return date + <n number of periods>
When a date offset is created for a negitive number of periods,
the date is first rolled forward. The pseudo code is:
def __add__(date):
date = rollforward(date) # does nothing is date is valid
return date + <n number of periods>
Zero presents a problem. Should it roll forward or back? We
arbitrarily have it rollforward:
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
"""
_cacheable = False
_normalize_cache = True
_kwds_use_relativedelta = (
'years', 'months', 'weeks', 'days',
'year', 'month', 'week', 'day', 'weekday',
'hour', 'minute', 'second', 'microsecond'
)
_use_relativedelta = False
_adjust_dst = False
# default for prior pickles
normalize = False
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self._offset, self._use_relativedelta = self._determine_offset()
def _determine_offset(self):
# timedelta is used for sub-daily plural offsets and all singular offsets
# relativedelta is used for plural offsets of daily length or more
# nanosecond(s) are handled by apply_wraps
kwds_no_nanos = dict(
(k, v) for k, v in self.kwds.items()
if k not in ('nanosecond', 'nanoseconds')
)
use_relativedelta = False
if len(kwds_no_nanos) > 0:
if any(k in self._kwds_use_relativedelta for k in kwds_no_nanos):
use_relativedelta = True
offset = relativedelta(**kwds_no_nanos)
else:
# sub-daily offset - use timedelta (tz-aware)
offset = timedelta(**kwds_no_nanos)
else:
offset = timedelta(1)
return offset, use_relativedelta
@apply_wraps
def apply(self, other):
if self._use_relativedelta:
other = as_datetime(other)
if len(self.kwds) > 0:
tzinfo = getattr(other, 'tzinfo', None)
if tzinfo is not None and self._use_relativedelta:
# perform calculation in UTC
other = other.replace(tzinfo=None)
if self.n > 0:
for i in range(self.n):
other = other + self._offset
else:
for i in range(-self.n):
other = other - self._offset
if tzinfo is not None and self._use_relativedelta:
# bring tz back from UTC calculation
other = tslib._localize_pydatetime(other, tzinfo)
return as_timestamp(other)
else:
return other + timedelta(self.n)
@apply_index_wraps
def apply_index(self, i):
"""
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation
.. versionadded:: 0.17.0
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
"""
if not type(self) is DateOffset:
raise NotImplementedError("DateOffset subclass %s "
"does not have a vectorized "
"implementation"
% (self.__class__.__name__,))
relativedelta_fast = set(['years', 'months', 'weeks',
'days', 'hours', 'minutes',
'seconds', 'microseconds'])
# relativedelta/_offset path only valid for base DateOffset
if (self._use_relativedelta and
set(self.kwds).issubset(relativedelta_fast)):
months = ((self.kwds.get('years', 0) * 12
+ self.kwds.get('months', 0)) * self.n)
if months:
shifted = tslib.shift_months(i.asi8, months)
i = i._shallow_copy(shifted)
weeks = (self.kwds.get('weeks', 0)) * self.n
if weeks:
i = (i.to_period('W') + weeks).to_timestamp() + i.to_perioddelta('W')
timedelta_kwds = dict((k,v) for k,v in self.kwds.items()
if k in ['days','hours','minutes',
'seconds','microseconds'])
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, '_offset'):
# timedelta
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
raise NotImplementedError("DateOffset with relativedelta "
"keyword(s) %s not able to be "
"applied vectorized" %
(set(self.kwds) - relativedelta_fast),)
def isAnchored(self):
return (self.n == 1)
def copy(self):
return self.__class__(self.n, normalize=self.normalize, **self.kwds)
def _should_cache(self):
return self.isAnchored() and self._cacheable
def _params(self):
all_paras = dict(list(vars(self).items()) + list(self.kwds.items()))
if 'holidays' in all_paras and not all_paras['holidays']:
all_paras.pop('holidays')
exclude = ['kwds', 'name','normalize', 'calendar']
attrs = [(k, v) for k, v in all_paras.items() if (k not in exclude ) and (k[0] != '_')]
attrs = sorted(set(attrs))
params = tuple([str(self.__class__)] + attrs)
return params
def __repr__(self):
if hasattr(self, '_named'):
return self._named
className = getattr(self, '_outputName', type(self).__name__)
exclude = set(['n', 'inc', 'normalize'])
attrs = []
for attr in sorted(self.__dict__):
if ((attr == 'kwds' and len(self.kwds) == 0)
or attr.startswith('_')):
continue
elif attr == 'kwds':
kwds_new = {}
for key in self.kwds:
if not hasattr(self, key):
kwds_new[key] = self.kwds[key]
if len(kwds_new) > 0:
attrs.append('='.join((attr, repr(kwds_new))))
else:
if attr not in exclude:
attrs.append('='.join((attr, repr(getattr(self, attr)))))
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
n_str = ""
if self.n != 1:
n_str = "%s * " % self.n
out = '<%s' % n_str + className + plural
if attrs:
out += ': ' + ', '.join(attrs)
out += '>'
return out
@property
def name(self):
if hasattr(self, '_named'):
return self._named
else:
return self.rule_code
def __eq__(self, other):
if other is None:
return False
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if not isinstance(other, DateOffset):
return False
return self._params() == other._params()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._params())
def __call__(self, other):
return self.apply(other)
def __add__(self, other):
if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
return other + self
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, datetime):
raise TypeError('Cannot subtract datetime from offset.')
elif type(other) == type(self):
return self.__class__(self.n - other.n, normalize=self.normalize, **self.kwds)
else: # pragma: no cover
return NotImplemented
def __rsub__(self, other):
if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
return other - self
return self.__class__(-self.n, normalize=self.normalize, **self.kwds) + other
def __mul__(self, someInt):
return self.__class__(n=someInt * self.n, normalize=self.normalize, **self.kwds)
def __rmul__(self, someInt):
return self.__mul__(someInt)
def __neg__(self):
return self.__class__(-self.n, normalize=self.normalize, **self.kwds)
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
# XXX, see #1395
if type(self) == DateOffset or isinstance(self, Tick):
return True
# Default (slow) method for determining if some date is a member of the
# date range generated by this offset. Subclasses may have this
# re-implemented in a nicer way.
a = dt
b = ((dt + self) - self)
return a == b
# helpers for vectorized offsets
def _beg_apply_index(self, i, freq):
"""Offsets index to beginning of Period frequency"""
off = i.to_perioddelta('D')
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n < 0:
# when subtracting, dates on start roll to prior
roll = np.where(base_period.to_timestamp() == i - off,
self.n, self.n + 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp()
return base + off
def _end_apply_index(self, i, freq):
"""Offsets index to end of Period frequency"""
off = i.to_perioddelta('D')
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n > 0:
# when adding, dtates on end roll to next
roll = np.where(base_period.to_timestamp(how='end') == i - off,
self.n, self.n - 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp(how='end')
return base + off
# way to get around weirdness with rule_code
@property
def _prefix(self):
raise NotImplementedError('Prefix not defined')
@property
def rule_code(self):
return self._prefix
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
return fstr
class SingleConstructorOffset(DateOffset):
@classmethod
def _from_name(cls, suffix=None):
# default _from_name calls cls with no args
if suffix:
raise ValueError("Bad freq suffix %s" % suffix)
return cls()
class BusinessMixin(object):
""" mixin to business types to provide related functions """
# TODO: Combine this with DateOffset by defining a whitelisted set of
# attributes on each object rather than the existing behavior of iterating
# over internal ``__dict__``
def __repr__(self):
if hasattr(self, '_named'):
return self._named
className = getattr(self, '_outputName', self.__class__.__name__)
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
n_str = ""
if self.n != 1:
n_str = "%s * " % self.n
out = '<%s' % n_str + className + plural + self._repr_attrs() + '>'
return out
def _repr_attrs(self):
if self.offset:
attrs = ['offset=%s' % repr(self.offset)]
else:
attrs = None
out = ''
if attrs:
out += ': ' + ', '.join(attrs)
return out
class BusinessDay(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
"""
_prefix = 'B'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
if self.offset:
fstr += self._offset_str()
return fstr
def _offset_str(self):
def get_str(td):
off_str = ''
if td.days > 0:
off_str += str(td.days) + 'D'
if td.seconds > 0:
s = td.seconds
hrs = int(s / 3600)
if hrs != 0:
off_str += str(hrs) + 'H'
s -= hrs * 3600
mts = int(s / 60)
if mts != 0:
off_str += str(mts) + 'Min'
s -= mts * 60
if s != 0:
off_str += str(s) + 's'
if td.microseconds > 0:
off_str += str(td.microseconds) + 'us'
return off_str
if isinstance(self.offset, timedelta):
zero = timedelta(0, 0, 0)
if self.offset >= zero:
off_str = '+' + get_str(self.offset)
else:
off_str = '-' + get_str(-self.offset)
return off_str
else:
return '+' + repr(self.offset)
def isAnchored(self):
return (self.n == 1)
@apply_wraps
def apply(self, other):
if isinstance(other, datetime):
n = self.n
if n == 0 and other.weekday() > 4:
n = 1
result = other
# avoid slowness below
if abs(n) > 5:
k = n // 5
result = result + timedelta(7 * k)
if n < 0 and result.weekday() > 4:
n += 1
n -= 5 * k
if n == 0 and result.weekday() > 4:
n -= 1
while n != 0:
k = n // abs(n)
result = result + timedelta(k)
if result.weekday() < 5:
n -= k
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine business day with '
'datetime or timedelta.')
@apply_index_wraps
def apply_index(self, i):
time = i.to_perioddelta('D')
# to_period rolls forward to next BDay; track and
# reduce n where it does when rolling forward
shifted = (i.to_perioddelta('B') - time).asi8 != 0
if self.n > 0:
roll = np.where(shifted, self.n - 1, self.n)
else:
roll = self.n
return (i.to_period('B') + roll).to_timestamp() + time
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() < 5
class BusinessHour(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
.. versionadded: 0.16.1
"""
_prefix = 'BH'
_anchor = 0
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
# must be validated here to equality check
kwds['start'] = self._validate_time(kwds.get('start', '09:00'))
kwds['end'] = self._validate_time(kwds.get('end', '17:00'))
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.start = kwds.get('start', '09:00')
self.end = kwds.get('end', '17:00')
# used for moving to next businessday
if self.n >= 0:
self.next_bday = BusinessDay(n=1)
else:
self.next_bday = BusinessDay(n=-1)
def _validate_time(self, t_input):
from datetime import time as dt_time
import time
if isinstance(t_input, compat.string_types):
try:
t = time.strptime(t_input, '%H:%M')
return dt_time(hour=t.tm_hour, minute=t.tm_min)
except ValueError:
raise ValueError("time data must match '%H:%M' format")
elif isinstance(t_input, dt_time):
if t_input.second != 0 or t_input.microsecond != 0:
raise ValueError("time data must be specified only with hour and minute")
return t_input
else:
raise ValueError("time data must be string or datetime.time")
def _get_daytime_flag(self):
if self.start == self.end:
raise ValueError('start and end must not be the same')
elif self.start < self.end:
return True
else:
return False
def _repr_attrs(self):
out = super(BusinessHour, self)._repr_attrs()
attrs = ['BH=%s-%s' % (self.start.strftime('%H:%M'),
self.end.strftime('%H:%M'))]
out += ': ' + ', '.join(attrs)
return out
def _next_opening_time(self, other):
"""
If n is positive, return tomorrow's business day opening time.
Otherwise yesterday's business day's opening time.
Opening time always locates on BusinessDay.
Otherwise, closing time may not if business hour extends over midnight.
"""
if not self.next_bday.onOffset(other):
other = other + self.next_bday
else:
if self.n >= 0 and self.start < other.time():
other = other + self.next_bday
elif self.n < 0 and other.time() < self.start:
other = other + self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _prev_opening_time(self, other):
"""
If n is positive, return yesterday's business day opening time.
Otherwise yesterday business day's opening time.
"""
if not self.next_bday.onOffset(other):
other = other - self.next_bday
else:
if self.n >= 0 and other.time() < self.start:
other = other - self.next_bday
elif self.n < 0 and other.time() > self.start:
other = other - self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _get_business_hours_by_sec(self):
"""
Return business hours in a day by seconds.
"""
if self._get_daytime_flag():
# create dummy datetime to calcurate businesshours in a day
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 1, self.end.hour, self.end.minute)
return tslib.tot_seconds(until - dtstart)
else:
self.daytime = False
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 2, self.end.hour, self.end.minute)
return tslib.tot_seconds(until - dtstart)
@apply_wraps
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
if not self.onOffset(dt):
businesshours = self._get_business_hours_by_sec()
if self.n >= 0:
dt = self._prev_opening_time(dt) + timedelta(seconds=businesshours)
else:
dt = self._next_opening_time(dt) + timedelta(seconds=businesshours)
return dt
@apply_wraps
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
if not self.onOffset(dt):
if self.n >= 0:
return self._next_opening_time(dt)
else:
return self._prev_opening_time(dt)
return dt
@apply_wraps
def apply(self, other):
# calcurate here because offset is not immutable
daytime = self._get_daytime_flag()
businesshours = self._get_business_hours_by_sec()
bhdelta = timedelta(seconds=businesshours)
if isinstance(other, datetime):
# used for detecting edge condition
nanosecond = getattr(other, 'nanosecond', 0)
# reset timezone and nanosecond
# other may be a Timestamp, thus not use replace
other = datetime(other.year, other.month, other.day,
other.hour, other.minute,
other.second, other.microsecond)
n = self.n
if n >= 0:
if (other.time() == self.end or
not self._onOffset(other, businesshours)):
other = self._next_opening_time(other)
else:
if other.time() == self.start:
# adjustment to move to previous business day
other = other - timedelta(seconds=1)
if not self._onOffset(other, businesshours):
other = self._next_opening_time(other)
other = other + bhdelta
bd, r = divmod(abs(n * 60), businesshours // 60)
if n < 0:
bd, r = -bd, -r
if bd != 0:
skip_bd = BusinessDay(n=bd)
# midnight busienss hour may not on BusinessDay
if not self.next_bday.onOffset(other):
remain = other - self._prev_opening_time(other)
other = self._next_opening_time(other + skip_bd) + remain
else:
other = other + skip_bd
hours, minutes = divmod(r, 60)
result = other + timedelta(hours=hours, minutes=minutes)
# because of previous adjustment, time will be larger than start
if ((daytime and (result.time() < self.start or self.end < result.time())) or
not daytime and (self.end < result.time() < self.start)):
if n >= 0:
bday_edge = self._prev_opening_time(other)
bday_edge = bday_edge + bhdelta
# calcurate remainder
bday_remain = result - bday_edge
result = self._next_opening_time(other)
result += bday_remain
else:
bday_edge = self._next_opening_time(other)
bday_remain = result - bday_edge
result = self._next_opening_time(result) + bhdelta
result += bday_remain
# edge handling
if n >= 0:
if result.time() == self.end:
result = self._next_opening_time(result)
else:
if result.time() == self.start and nanosecond == 0:
# adjustment to move to previous business day
result = self._next_opening_time(result- timedelta(seconds=1)) +bhdelta
return result
else:
raise ApplyTypeError('Only know how to combine business hour with ')
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if dt.tzinfo is not None:
dt = datetime(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, dt.microsecond)
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
businesshours = self._get_business_hours_by_sec()
return self._onOffset(dt, businesshours)
def _onOffset(self, dt, businesshours):
"""
Slight speedups using calcurated values
"""
# if self.normalize and not _is_normalized(dt):
# return False
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
if self.n >= 0:
op = self._prev_opening_time(dt)
else:
op = self._next_opening_time(dt)
span = tslib.tot_seconds(dt - op)
if span <= businesshours:
return True
else:
return False
class CustomBusinessDay(BusinessDay):
"""
**EXPERIMENTAL** DateOffset subclass representing possibly n business days
excluding holidays
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'C'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
calendar, holidays = self.get_calendar(weekmask=weekmask,
holidays=holidays,
calendar=calendar)
# CustomBusinessDay instances are identified by the
# following two attributes. See DateOffset._params()
# holidays, weekmask
self.kwds['weekmask'] = self.weekmask = weekmask
self.kwds['holidays'] = self.holidays = holidays
self.kwds['calendar'] = self.calendar = calendar
def get_calendar(self, weekmask, holidays, calendar):
'''Generate busdaycalendar'''
if isinstance(calendar, np.busdaycalendar):
if not holidays:
holidays = tuple(calendar.holidays)
elif not isinstance(holidays, tuple):
holidays = tuple(holidays)
else:
# trust that calendar.holidays and holidays are
# consistent
pass
return calendar, holidays
if holidays is None:
holidays = []
try:
holidays = holidays + calendar.holidays().tolist()
except AttributeError:
pass
holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in
holidays]
holidays = tuple(sorted(holidays))
kwargs = {'weekmask': weekmask}
if holidays:
kwargs['holidays'] = holidays
try:
busdaycalendar = np.busdaycalendar(**kwargs)
except:
# Check we have the required numpy version
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
raise NotImplementedError("CustomBusinessDay requires numpy >= "
"1.7.0. Current version: " +
np.__version__)
else:
raise
return busdaycalendar, holidays
def __getstate__(self):
"""Return a pickleable state"""
state = self.__dict__.copy()
del state['calendar']
# we don't want to actually pickle the calendar object
# as its a np.busyday; we recreate on deserilization
try:
state['kwds'].pop('calendar')
except:
pass
return state
def __setstate__(self, state):
"""Reconstruct an instance from a pickled state"""
self.__dict__ = state
calendar, holidays = self.get_calendar(weekmask=self.weekmask,
holidays=self.holidays,
calendar=None)
self.kwds['calendar'] = self.calendar = calendar
self.kwds['holidays'] = self.holidays = holidays
self.kwds['weekmask'] = state['weekmask']
@apply_wraps
def apply(self, other):
if self.n <= 0:
roll = 'forward'
else:
roll = 'backward'
if isinstance(other, datetime):
date_in = other
np_dt = np.datetime64(date_in.date())
np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll,
busdaycal=self.calendar)
dt_date = np_incr_dt.astype(datetime)
result = datetime.combine(dt_date, date_in.time())
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine trading day with '
'datetime, datetime64 or timedelta.')
def apply_index(self, i):
raise NotImplementedError
@staticmethod
def _to_dt64(dt, dtype='datetime64'):
# Currently
# > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')
# numpy.datetime64('2013-05-01T02:00:00.000000+0200')
# Thus astype is needed to cast datetime to datetime64[D]
if getattr(dt, 'tzinfo', None) is not None:
i8 = tslib.pydt_to_i8(dt)
dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)
dt = Timestamp(dt)
dt = np.datetime64(dt)
if dt.dtype.name != dtype:
dt = dt.astype(dtype)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
day64 = self._to_dt64(dt,'datetime64[D]')
return np.is_busday(day64, busdaycal=self.calendar)
class MonthOffset(SingleConstructorOffset):
_adjust_dst = True
@property
def name(self):
if self.isAnchored:
return self.rule_code
else:
return "%s-%s" % (self.rule_code, _int_to_month[self.n])
class MonthEnd(MonthOffset):
"""DateOffset of one month end"""
@apply_wraps
def apply(self, other):
n = self.n
_, days_in_month = tslib.monthrange(other.year, other.month)
if other.day != days_in_month:
other = other + relativedelta(months=-1, day=31)
if n <= 0:
n = n + 1
other = other + relativedelta(months=n, day=31)
return other
@apply_index_wraps
def apply_index(self, i):
months = self.n - 1 if self.n >= 0 else self.n
shifted = tslib.shift_months(i.asi8, months, 'end')
return i._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
days_in_month = tslib.monthrange(dt.year, dt.month)[1]
return dt.day == days_in_month
_prefix = 'M'
class MonthBegin(MonthOffset):
"""DateOffset of one month at beginning"""
@apply_wraps
def apply(self, other):
n = self.n
if other.day > 1 and n <= 0: # then roll forward if n<=0
n += 1
return other + relativedelta(months=n, day=1)
@apply_index_wraps
def apply_index(self, i):
months = self.n + 1 if self.n < 0 else self.n
shifted = tslib.shift_months(i.asi8, months, 'start')
return i._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day == 1
_prefix = 'MS'
class BusinessMonthEnd(MonthOffset):
"""DateOffset increments between business EOM dates"""
def isAnchored(self):
return (self.n == 1)
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
if n > 0 and not other.day >= lastBDay:
n = n - 1
elif n <= 0 and other.day > lastBDay:
n = n + 1
other = other + relativedelta(months=n, day=31)
if other.weekday() > 4:
other = other - BDay()
return other
_prefix = 'BM'
class BusinessMonthBegin(MonthOffset):
"""DateOffset of one business month at beginning"""
@apply_wraps
def apply(self, other):
n = self.n
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
if other.day > first and n <= 0:
# as if rolled forward already
n += 1
elif other.day < first and n > 0:
other = other + timedelta(days=first - other.day)
n -= 1
other = other + relativedelta(months=n)
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first, other.hour, other.minute,
other.second, other.microsecond)
return result
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
first_weekday, _ = tslib.monthrange(dt.year, dt.month)
if first_weekday == 5:
return dt.day == 3
elif first_weekday == 6:
return dt.day == 2
else:
return dt.day == 1
_prefix = 'BMS'
class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBM'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
self.m_offset = MonthEnd(n=1, normalize=normalize, **kwds)
self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self,other):
n = self.n
# First move to month offset
cur_mend = self.m_offset.rollforward(other)
# Find this custom month offset
cur_cmend = self.cbday.rollback(cur_mend)
# handle zero case. arbitrarily rollforward
if n == 0 and other != cur_cmend:
n += 1
if other < cur_cmend and n >= 1:
n -= 1
elif other > cur_cmend and n <= -1:
n += 1
new = cur_mend + n * self.m_offset
result = self.cbday.rollback(new)
return result
class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBMS'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
self.m_offset = MonthBegin(n=1, normalize=normalize, **kwds)
self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self,other):
n = self.n
dt_in = other
# First move to month offset
cur_mbegin = self.m_offset.rollback(dt_in)
# Find this custom month offset
cur_cmbegin = self.cbday.rollforward(cur_mbegin)
# handle zero case. arbitrarily rollforward
if n == 0 and dt_in != cur_cmbegin:
n += 1
if dt_in > cur_cmbegin and n <= -1:
n += 1
elif dt_in < cur_cmbegin and n >= 1:
n -= 1
new = cur_mbegin + n * self.m_offset
result = self.cbday.rollforward(new)
return result
class Week(DateOffset):
"""
Weekly offset
Parameters
----------
weekday : int, default None
Always generate specific day of week. 0 for Monday
"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds.get('weekday', None)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self._inc = timedelta(weeks=1)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
@apply_wraps
def apply(self, other):
base = other
if self.weekday is None:
return other + self.n * self._inc
if self.n > 0:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
k = k - 1
other = other
for i in range(k):
other = other + self._inc
else:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
for i in range(-k):
other = other - self._inc
other = datetime(other.year, other.month, other.day,
base.hour, base.minute, base.second, base.microsecond)
return other
@apply_index_wraps
def apply_index(self, i):
if self.weekday is None:
return (i.to_period('W') + self.n).to_timestamp() + i.to_perioddelta('W')
else:
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() == self.weekday
_prefix = 'W'
@property
def rule_code(self):
suffix = ''
if self.weekday is not None:
suffix = '-%s' % (_int_to_weekday[self.weekday])
return self._prefix + suffix
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
weekday = None
else:
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
class WeekDay(object):
MON = 0
TUE = 1
WED = 2
THU = 3
FRI = 4
SAT = 5
SUN = 6
_int_to_weekday = {
WeekDay.MON: 'MON',
WeekDay.TUE: 'TUE',
WeekDay.WED: 'WED',
WeekDay.THU: 'THU',
WeekDay.FRI: 'FRI',
WeekDay.SAT: 'SAT',
WeekDay.SUN: 'SUN'
}
_weekday_to_int = dict((v, k) for k, v in _int_to_weekday.items())
class WeekOfMonth(DateOffset):
"""
Describes monthly dates like "the Tuesday of the 2nd week of each month"
Parameters
----------
n : int
week : {0, 1, 2, 3, ...}
0 is 1st week of month, 1 2nd week, etc.
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds['weekday']
self.week = kwds['week']
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
if self.week < 0 or self.week > 3:
raise ValueError('Week must be 0<=day<=3, got %d' %
self.week)
self.kwds = kwds
@apply_wraps
def apply(self, other):
base = other
offsetOfMonth = self.getOffsetOfMonth(other)
if offsetOfMonth > other:
if self.n > 0:
months = self.n - 1
else:
months = self.n
elif offsetOfMonth == other:
months = self.n
else:
if self.n > 0:
months = self.n
else:
months = self.n + 1
other = self.getOffsetOfMonth(other + relativedelta(months=months, day=1))
other = datetime(other.year, other.month, other.day, base.hour,
base.minute, base.second, base.microsecond)
return other
def getOffsetOfMonth(self, dt):
w = Week(weekday=self.weekday)
d = datetime(dt.year, dt.month, 1, tzinfo=dt.tzinfo)
d = w.rollforward(d)
for i in range(self.week):
d = w.apply(d)
return d
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
d = datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo)
return d == self.getOffsetOfMonth(dt)
@property
def rule_code(self):
return '%s-%d%s' % (self._prefix, self.week + 1,
_int_to_weekday.get(self.weekday, ''))
_prefix = 'WOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
# TODO: handle n here...
# only one digit weeks (1 --> week 0, 2 --> week 1, etc.)
week = int(suffix[0]) - 1
weekday = _weekday_to_int[suffix[1:]]
return cls(week=week, weekday=weekday)
class LastWeekOfMonth(DateOffset):
"""
Describes monthly dates in last week of month like "the last Tuesday of each month"
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds['weekday']
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self.kwds = kwds
@apply_wraps
def apply(self, other):
offsetOfMonth = self.getOffsetOfMonth(other)
if offsetOfMonth > other:
if self.n > 0:
months = self.n - 1
else:
months = self.n
elif offsetOfMonth == other:
months = self.n
else:
if self.n > 0:
months = self.n
else:
months = self.n + 1
return self.getOffsetOfMonth(other + relativedelta(months=months, day=1))
def getOffsetOfMonth(self, dt):
m = MonthEnd()
d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute,
dt.second, dt.microsecond, tzinfo=dt.tzinfo)
eom = m.rollforward(d)
w = Week(weekday=self.weekday)
return w.rollback(eom)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt == self.getOffsetOfMonth(dt)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_weekday.get(self.weekday, ''))
_prefix = 'LWOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
# TODO: handle n here...
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
class QuarterOffset(DateOffset):
"""Quarter representation - doesn't call super"""
#: default month for __init__
_default_startingMonth = None
#: default month in _from_name
_from_name_startingMonth = None
_adjust_dst = True
# TODO: Consider combining QuarterOffset and YearOffset __init__ at some
# point
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds.get('startingMonth',
self._default_startingMonth)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['startingMonth'] = _month_to_int[suffix]
else:
if cls._from_name_startingMonth is not None:
kwargs['startingMonth'] = cls._from_name_startingMonth
return cls(**kwargs)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_month[self.startingMonth])
class BQuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
"""
_outputName = 'BusinessQuarterEnd'
_default_startingMonth = 3
# 'BQ'
_from_name_startingMonth = 12
_prefix = 'BQ'
@apply_wraps
def apply(self, other):
n = self.n
base = other
other = datetime(other.year, other.month, other.day,
other.hour, other.minute, other.second,
other.microsecond)
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= lastBDay and monthsToGo == 0):
n = n - 1
elif n <= 0 and other.day > lastBDay and monthsToGo == 0:
n = n + 1
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
other = tslib._localize_pydatetime(other, base.tzinfo)
if other.weekday() > 4:
other = other - BDay()
return other
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
modMonth = (dt.month - self.startingMonth) % 3
return BMonthEnd().onOffset(dt) and modMonth == 0
_int_to_month = tslib._MONTH_ALIASES
_month_to_int = dict((v, k) for k, v in _int_to_month.items())
# TODO: This is basically the same as BQuarterEnd
class BQuarterBegin(QuarterOffset):
_outputName = "BusinessQuarterBegin"
# I suspect this is wrong for *all* of them.
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'BQS'
@apply_wraps
def apply(self, other):
n = self.n
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
monthsSince = (other.month - self.startingMonth) % 3
if n <= 0 and monthsSince != 0: # make sure to roll forward so negate
monthsSince = monthsSince - 3
# roll forward if on same month later than first bday
if n <= 0 and (monthsSince == 0 and other.day > first):
n = n + 1
# pretend to roll back if on same month but before firstbday
elif n > 0 and (monthsSince == 0 and other.day < first):
n = n - 1
# get the first bday for result
other = other + relativedelta(months=3 * n - monthsSince)
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first,
other.hour, other.minute, other.second,
other.microsecond)
return result
class QuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...
"""
_outputName = 'QuarterEnd'
_default_startingMonth = 3
_prefix = 'Q'
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds.get('startingMonth', 3)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@apply_wraps
def apply(self, other):
n = self.n
other = datetime(other.year, other.month, other.day,
other.hour, other.minute, other.second,
other.microsecond)
wkday, days_in_month = tslib.monthrange(other.year, other.month)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= days_in_month and monthsToGo == 0):
n = n - 1
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
return other
@apply_index_wraps
def apply_index(self, i):
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
modMonth = (dt.month - self.startingMonth) % 3
return MonthEnd().onOffset(dt) and modMonth == 0
class QuarterBegin(QuarterOffset):
_outputName = 'QuarterBegin'
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'QS'
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
monthsSince = (other.month - self.startingMonth) % 3
if n <= 0 and monthsSince != 0:
# make sure you roll forward, so negate
monthsSince = monthsSince - 3
if n < 0 and (monthsSince == 0 and other.day > 1):
# after start, so come back an extra period as if rolled forward
n = n + 1
other = other + relativedelta(months=3 * n - monthsSince, day=1)
return other
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.startingMonth == 1 else self.startingMonth - 1
freqstr = 'Q-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
class YearOffset(DateOffset):
"""DateOffset that just needs a month"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.month = kwds.get('month', self._default_month)
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
DateOffset.__init__(self, n=n, normalize=normalize, **kwds)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['month'] = _month_to_int[suffix]
return cls(**kwargs)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_month[self.month])
class BYearEnd(YearOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessYearEnd'
_default_month = 12
_prefix = 'BA'
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, self.month)
lastBDay = (days_in_month -
max(((wkday + days_in_month - 1) % 7) - 4, 0))
years = n
if n > 0:
if (other.month < self.month or
(other.month == self.month and other.day < lastBDay)):
years -= 1
elif n <= 0:
if (other.month > self.month or
(other.month == self.month and other.day > lastBDay)):
years += 1
other = other + relativedelta(years=years)
_, days_in_month = tslib.monthrange(other.year, self.month)
result = datetime(other.year, self.month, days_in_month,
other.hour, other.minute, other.second,
other.microsecond)
if result.weekday() > 4:
result = result - BDay()
return result
class BYearBegin(YearOffset):
"""DateOffset increments between business year begin dates"""
_outputName = 'BusinessYearBegin'
_default_month = 1
_prefix = 'BAS'
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
years = n
if n > 0: # roll back first for positive n
if (other.month < self.month or
(other.month == self.month and other.day < first)):
years -= 1
elif n <= 0: # roll forward
if (other.month > self.month or
(other.month == self.month and other.day > first)):
years += 1
# set first bday for result
other = other + relativedelta(years=years)
wkday, days_in_month = tslib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
return datetime(other.year, self.month, first, other.hour,
other.minute, other.second, other.microsecond)
class YearEnd(YearOffset):
"""DateOffset increments between calendar year ends"""
_default_month = 12
_prefix = 'A'
@apply_wraps
def apply(self, other):
def _increment(date):
if date.month == self.month:
_, days_in_month = tslib.monthrange(date.year, self.month)
if date.day != days_in_month:
year = date.year
else:
year = date.year + 1
elif date.month < self.month:
year = date.year
else:
year = date.year + 1
_, days_in_month = tslib.monthrange(year, self.month)
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
def _decrement(date):
year = date.year if date.month > self.month else date.year - 1
_, days_in_month = tslib.monthrange(year, self.month)
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
def _rollf(date):
if date.month != self.month or\
date.day < tslib.monthrange(date.year, date.month)[1]:
date = _increment(date)
return date
n = self.n
result = other
if n > 0:
while n > 0:
result = _increment(result)
n -= 1
elif n < 0:
while n < 0:
result = _decrement(result)
n += 1
else:
# n == 0, roll forward
result = _rollf(result)
return result
@apply_index_wraps
def apply_index(self, i):
# convert month anchor to annual period tuple
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
wkday, days_in_month = tslib.monthrange(dt.year, self.month)
return self.month == dt.month and dt.day == days_in_month
class YearBegin(YearOffset):
"""DateOffset increments between calendar year begin dates"""
_default_month = 1
_prefix = 'AS'
@apply_wraps
def apply(self, other):
def _increment(date, n):
year = date.year + n - 1
if date.month >= self.month:
year += 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
def _decrement(date, n):
year = date.year + n + 1
if date.month < self.month or (date.month == self.month and
date.day == 1):
year -= 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
def _rollf(date):
if (date.month != self.month) or date.day > 1:
date = _increment(date, 1)
return date
n = self.n
result = other
if n > 0:
result = _increment(result, n)
elif n < 0:
result = _decrement(result, n)
else:
# n == 0, roll forward
result = _rollf(result)
return result
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.month == 1 else self.month - 1
freqstr = 'A-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.month == self.month and dt.day == 1
class FY5253(DateOffset):
"""
Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'RE'
_suffix_prefix_last = 'L'
_suffix_prefix_nearest = 'N'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds['startingMonth']
self.weekday = kwds["weekday"]
self.variation = kwds["variation"]
self.kwds = kwds
if self.n == 0:
raise ValueError('N cannot be 0')
if self.variation not in ["nearest", "last"]:
raise ValueError('%s is not a valid variation' % self.variation)
if self.variation == "nearest":
weekday_offset = weekday(self.weekday)
self._rd_forward = relativedelta(weekday=weekday_offset)
self._rd_backward = relativedelta(weekday=weekday_offset(-1))
else:
self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday)
def isAnchored(self):
return self.n == 1 \
and self.startingMonth is not None \
and self.weekday is not None
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
dt = datetime(dt.year, dt.month, dt.day)
year_end = self.get_year_end(dt)
if self.variation == "nearest":
# We have to check the year end of "this" cal year AND the previous
return year_end == dt or \
self.get_year_end(dt - relativedelta(months=1)) == dt
else:
return year_end == dt
@apply_wraps
def apply(self, other):
n = self.n
prev_year = self.get_year_end(
datetime(other.year - 1, self.startingMonth, 1))
cur_year = self.get_year_end(
datetime(other.year, self.startingMonth, 1))
next_year = self.get_year_end(
datetime(other.year + 1, self.startingMonth, 1))
prev_year = tslib._localize_pydatetime(prev_year, other.tzinfo)
cur_year = tslib._localize_pydatetime(cur_year, other.tzinfo)
next_year = tslib._localize_pydatetime(next_year, other.tzinfo)
if n > 0:
if other == prev_year:
year = other.year - 1
elif other == cur_year:
year = other.year
elif other == next_year:
year = other.year + 1
elif other < prev_year:
year = other.year - 1
n -= 1
elif other < cur_year:
year = other.year
n -= 1
elif other < next_year:
year = other.year + 1
n -= 1
else:
assert False
result = self.get_year_end(datetime(year + n, self.startingMonth, 1))
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second, other.microsecond)
return result
else:
n = -n
if other == prev_year:
year = other.year - 1
elif other == cur_year:
year = other.year
elif other == next_year:
year = other.year + 1
elif other > next_year:
year = other.year + 1
n -= 1
elif other > cur_year:
year = other.year
n -= 1
elif other > prev_year:
year = other.year - 1
n -= 1
else:
assert False
result = self.get_year_end(datetime(year - n, self.startingMonth, 1))
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second, other.microsecond)
return result
def get_year_end(self, dt):
if self.variation == "nearest":
return self._get_year_end_nearest(dt)
else:
return self._get_year_end_last(dt)
def get_target_month_end(self, dt):
target_month = datetime(dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)
next_month_first_of = target_month + relativedelta(months=+1)
return next_month_first_of + relativedelta(days=-1)
def _get_year_end_nearest(self, dt):
target_date = self.get_target_month_end(dt)
if target_date.weekday() == self.weekday:
return target_date
else:
forward = target_date + self._rd_forward
backward = target_date + self._rd_backward
if forward - target_date < target_date - backward:
return forward
else:
return backward
def _get_year_end_last(self, dt):
current_year = datetime(dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)
return current_year + self._offset_lwom
@property
def rule_code(self):
suffix = self.get_rule_code_suffix()
return "%s-%s" % (self._get_prefix(), suffix)
def _get_prefix(self):
return self._prefix
def _get_suffix_prefix(self):
if self.variation == "nearest":
return self._suffix_prefix_nearest
else:
return self._suffix_prefix_last
def get_rule_code_suffix(self):
return '%s-%s-%s' % (self._get_suffix_prefix(), \
_int_to_month[self.startingMonth], \
_int_to_weekday[self.weekday])
@classmethod
def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
if varion_code == "N":
variation = "nearest"
elif varion_code == "L":
variation = "last"
else:
raise ValueError(
"Unable to parse varion_code: %s" % (varion_code,))
startingMonth = _month_to_int[startingMonth_code]
weekday = _weekday_to_int[weekday_code]
return {
"weekday": weekday,
"startingMonth": startingMonth,
"variation": variation,
}
@classmethod
def _from_name(cls, *args):
return cls(**cls._parse_suffix(*args))
class FY5253Quarter(DateOffset):
"""
DateOffset increments between business quarter dates
for 52-53 week fiscal year (also known as a 4-4-5 calendar).
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
qtr_with_extra_week : The quarter number that has the leap
or 14 week when needed. {1,2,3,4}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'REQ'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.qtr_with_extra_week = kwds["qtr_with_extra_week"]
self.kwds = kwds
if self.n == 0:
raise ValueError('N cannot be 0')
self._offset = FY5253( \
startingMonth=kwds['startingMonth'], \
weekday=kwds["weekday"],
variation=kwds["variation"])
def isAnchored(self):
return self.n == 1 and self._offset.isAnchored()
@apply_wraps
def apply(self, other):
base = other
n = self.n
if n > 0:
while n > 0:
if not self._offset.onOffset(other):
qtr_lens = self.get_weeks(other)
start = other - self._offset
else:
start = other
qtr_lens = self.get_weeks(other + self._offset)
for weeks in qtr_lens:
start += relativedelta(weeks=weeks)
if start > other:
other = start
n -= 1
break
else:
n = -n
while n > 0:
if not self._offset.onOffset(other):
qtr_lens = self.get_weeks(other)
end = other + self._offset
else:
end = other
qtr_lens = self.get_weeks(other)
for weeks in reversed(qtr_lens):
end -= relativedelta(weeks=weeks)
if end < other:
other = end
n -= 1
break
other = datetime(other.year, other.month, other.day,
base.hour, base.minute, base.second, base.microsecond)
return other
def get_weeks(self, dt):
ret = [13] * 4
year_has_extra_week = self.year_has_extra_week(dt)
if year_has_extra_week:
ret[self.qtr_with_extra_week - 1] = 14
return ret
def year_has_extra_week(self, dt):
if self._offset.onOffset(dt):
prev_year_end = dt - self._offset
next_year_end = dt
else:
next_year_end = dt + self._offset
prev_year_end = dt - self._offset
week_in_year = (next_year_end - prev_year_end).days / 7
return week_in_year == 53
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if self._offset.onOffset(dt):
return True
next_year_end = dt - self._offset
qtr_lens = self.get_weeks(dt)
current = next_year_end
for qtr_len in qtr_lens[0:4]:
current += relativedelta(weeks=qtr_len)
if dt == current:
return True
return False
@property
def rule_code(self):
suffix = self._offset.get_rule_code_suffix()
return "%s-%s" % (self._prefix,
"%s-%d" % (suffix, self.qtr_with_extra_week))
@classmethod
def _from_name(cls, *args):
return cls(**dict(FY5253._parse_suffix(*args[:-1]),
qtr_with_extra_week=int(args[-1])))
class Easter(DateOffset):
'''
DateOffset for the Easter holiday using
logic defined in dateutil. Right now uses
the revised method which is valid in years
1583-4099.
'''
_adjust_dst = True
def __init__(self, n=1, **kwds):
super(Easter, self).__init__(n, **kwds)
@apply_wraps
def apply(self, other):
currentEaster = easter(other.year)
currentEaster = datetime(currentEaster.year, currentEaster.month, currentEaster.day)
currentEaster = tslib._localize_pydatetime(currentEaster, other.tzinfo)
# NOTE: easter returns a datetime.date so we have to convert to type of other
if self.n >= 0:
if other >= currentEaster:
new = easter(other.year + self.n)
else:
new = easter(other.year + self.n - 1)
else:
if other > currentEaster:
new = easter(other.year + self.n + 1)
else:
new = easter(other.year + self.n)
new = datetime(new.year, new.month, new.day, other.hour,
other.minute, other.second, other.microsecond)
return new
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return date(dt.year, dt.month, dt.day) == easter(dt.year)
#----------------------------------------------------------------------
# Ticks
import operator
def _tick_comp(op):
def f(self, other):
return op(self.delta, other.delta)
return f
class Tick(SingleConstructorOffset):
_inc = Timedelta(microseconds=1000)
__gt__ = _tick_comp(operator.gt)
__ge__ = _tick_comp(operator.ge)
__lt__ = _tick_comp(operator.lt)
__le__ = _tick_comp(operator.le)
__eq__ = _tick_comp(operator.eq)
__ne__ = _tick_comp(operator.ne)
def __add__(self, other):
if isinstance(other, Tick):
if type(self) == type(other):
return type(self)(self.n + other.n)
else:
return _delta_to_tick(self.delta + other.delta)
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
def __eq__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta == other.delta
else:
return DateOffset.__eq__(self, other)
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
def __hash__(self):
return hash(self._params())
def __ne__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta != other.delta
else:
return DateOffset.__ne__(self, other)
@property
def delta(self):
return self.n * self._inc
@property
def nanos(self):
return _delta_to_nanoseconds(self.delta)
def apply(self, other):
# Timestamp can handle tz and nano sec, thus no need to use apply_wraps
if isinstance(other, (datetime, np.datetime64, date)):
return as_timestamp(other) + self
if isinstance(other, timedelta):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
else:
raise ApplyTypeError('Unhandled type: %s' % type(other).__name__)
_prefix = 'undefined'
def isAnchored(self):
return False
def _delta_to_tick(delta):
if delta.microseconds == 0:
if delta.seconds == 0:
return Day(delta.days)
else:
seconds = delta.days * 86400 + delta.seconds
if seconds % 3600 == 0:
return Hour(seconds / 3600)
elif seconds % 60 == 0:
return Minute(seconds / 60)
else:
return Second(seconds)
else:
nanos = _delta_to_nanoseconds(delta)
if nanos % 1000000 == 0:
return Milli(nanos // 1000000)
elif nanos % 1000 == 0:
return Micro(nanos // 1000)
else: # pragma: no cover
return Nano(nanos)
_delta_to_nanoseconds = tslib._delta_to_nanoseconds
class Day(Tick):
_inc = Timedelta(days=1)
_prefix = 'D'
class Hour(Tick):
_inc = Timedelta(hours=1)
_prefix = 'H'
class Minute(Tick):
_inc = Timedelta(minutes=1)
_prefix = 'T'
class Second(Tick):
_inc = Timedelta(seconds=1)
_prefix = 'S'
class Milli(Tick):
_inc = Timedelta(milliseconds=1)
_prefix = 'L'
class Micro(Tick):
_inc = Timedelta(microseconds=1)
_prefix = 'U'
class Nano(Tick):
_inc = Timedelta(nanoseconds=1)
_prefix = 'N'
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
CBMonthEnd = CustomBusinessMonthEnd
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
def _get_firstbday(wkday):
"""
wkday is the result of monthrange(year, month)
If it's a saturday or sunday, increment first business day to reflect this
"""
first = 1
if wkday == 5: # on Saturday
first = 3
elif wkday == 6: # on Sunday
first = 2
return first
def generate_range(start=None, end=None, periods=None,
offset=BDay(), time_rule=None):
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments
Parameters
----------
start : datetime (default None)
end : datetime (default None)
periods : int, optional
time_rule : (legacy) name of DateOffset object to be used, optional
Corresponds with names expected by tseries.frequencies.get_offset
Notes
-----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
* If both time_rule and offset are specified, time_rule supersedes offset.
Returns
-------
dates : generator object
"""
if time_rule is not None:
from pandas.tseries.frequencies import get_offset
offset = get_offset(time_rule)
start = to_datetime(start)
end = to_datetime(end)
if start and not offset.onOffset(start):
start = offset.rollforward(start)
elif end and not offset.onOffset(end):
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
cur = start
if offset.n >= 0:
while cur <= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
raise ValueError('Offset %s did not increment date' % offset)
cur = next_date
else:
while cur >= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date >= cur:
raise ValueError('Offset %s did not decrement date' % offset)
cur = next_date
prefix_mapping = dict((offset._prefix, offset) for offset in [
YearBegin, # 'AS'
YearEnd, # 'A'
BYearBegin, # 'BAS'
BYearEnd, # 'BA'
BusinessDay, # 'B'
BusinessMonthBegin, # 'BMS'
BusinessMonthEnd, # 'BM'
BQuarterEnd, # 'BQ'
BQuarterBegin, # 'BQS'
BusinessHour, # 'BH'
CustomBusinessDay, # 'C'
CustomBusinessMonthEnd, # 'CBM'
CustomBusinessMonthBegin, # 'CBMS'
MonthEnd, # 'M'
MonthBegin, # 'MS'
Week, # 'W'
Second, # 'S'
Minute, # 'T'
Micro, # 'U'
QuarterEnd, # 'Q'
QuarterBegin, # 'QS'
Milli, # 'L'
Hour, # 'H'
Day, # 'D'
WeekOfMonth, # 'WOM'
FY5253,
FY5253Quarter,
])
prefix_mapping['N'] = Nano
def _make_offset(key):
"""Gets offset based on key. KeyError if prefix is bad, ValueError if
suffix is bad. All handled by `get_offset` in tseries/frequencies. Not
public."""
if key is None:
return None
split = key.split('-')
klass = prefix_mapping[split[0]]
# handles case where there's no suffix (and will TypeError if too many '-')
obj = klass._from_name(*split[1:])
obj._named = key
return obj
| artistic-2.0 |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 153 | 6723 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| mit |
luo66/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
nrhine1/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
sniemi/SamPy | plot/colorbarExample2.py | 1 | 1675 | import pylab as P
import numpy
from mpl_toolkits.axes_grid import make_axes_locatable
import matplotlib.axes as maxes
from matplotlib import cm
if __name__ == '__main__':
fig = P.figure(figsize=(10,10))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
s1 = ax1.scatter(numpy.random.rand(10),
numpy.random.rand(10),
c=numpy.random.rand(10))
divider = make_axes_locatable(ax1)
cax1 = divider.new_horizontal('5%', pad=0.0, axes_class=maxes.Axes)
fig.add_axes(cax1)
c1 = fig.colorbar(s1, cax = cax1,orientation = 'horizontal')
s2 = ax2.scatter(numpy.random.rand(10),
numpy.random.rand(10),
c=numpy.random.rand(10),
cmap = cm.get_cmap('jet'))
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes('right', 0.1, pad=0.1)
c2 = fig.colorbar(s2, cax = cax2)
#p = matplotlib.patches.Patch(color=cm.get_cmap('jet'))
#ax2.legend([p],['Test'])
s3 = ax3.scatter(numpy.random.rand(10),
numpy.random.rand(10),
c=numpy.random.rand(10))
cax3 = fig.add_axes([0.2, 0.4, 0.1, 0.01]) #[left, bottom, width, height]
c3 = fig.colorbar(s3, cax = cax3, orientation = 'horizontal',
ticks=[0.15, 0.5, 0.85])
s4 = ax4.scatter(numpy.random.rand(10),
numpy.random.rand(10),
c=numpy.random.rand(10))
divider = make_axes_locatable(ax4)
cax4 = fig.add_axes([0.55, 0.25, 0.05, 0.2])
c4 = fig.colorbar(s4, cax = cax4)
P.show()
| bsd-2-clause |
justincassidy/scikit-learn | sklearn/metrics/ranking.py | 79 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
dongjoon-hyun/spark | python/setup.py | 14 | 13273 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
import glob
import os
import sys
from setuptools import setup
from setuptools.command.install import install
from shutil import copyfile, copytree, rmtree
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
try:
spec = importlib.util.spec_from_file_location("install", "pyspark/install.py")
install_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(install_module)
except IOError:
print("Failed to load the installing module (pyspark/install.py) which had to be "
"packaged together.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
USER_SCRIPTS_PATH = os.path.join(SPARK_HOME, "sbin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
USER_SCRIPTS_TARGET = os.path.join(TEMP_PATH, "sbin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/pandas/utils.py
# For Arrow, you should also check ./pom.xml and ensure there are no breaking changes in the
# binary format protocol with the Java version, see ARROW_HOME/format/* for specifications.
# Also don't forget to update python/docs/source/getting_started/install.rst.
_minimum_pandas_version = "0.23.2"
_minimum_pyarrow_version = "1.0.0"
class InstallCommand(install):
# TODO(SPARK-32837) leverage pip's custom options
def run(self):
install.run(self)
# Make sure the destination is always clean.
spark_dist = os.path.join(self.install_lib, "pyspark", "spark-distribution")
rmtree(spark_dist, ignore_errors=True)
if ("PYSPARK_HADOOP_VERSION" in os.environ) or ("PYSPARK_HIVE_VERSION" in os.environ):
# Note that PYSPARK_VERSION environment is just a testing purpose.
# PYSPARK_HIVE_VERSION environment variable is also internal for now in case
# we support another version of Hive in the future.
spark_version, hadoop_version, hive_version = install_module.checked_versions(
os.environ.get("PYSPARK_VERSION", VERSION).lower(),
os.environ.get("PYSPARK_HADOOP_VERSION", install_module.DEFAULT_HADOOP).lower(),
os.environ.get("PYSPARK_HIVE_VERSION", install_module.DEFAULT_HIVE).lower())
if ("PYSPARK_VERSION" not in os.environ and
((install_module.DEFAULT_HADOOP, install_module.DEFAULT_HIVE) ==
(hadoop_version, hive_version))):
# Do not download and install if they are same as default.
return
install_module.install_spark(
dest=spark_dist,
spark_version=spark_version,
hadoop_version=hadoop_version,
hive_version=hive_version)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
with open('README.md') as f:
long_description = f.read()
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
long_description_content_type="text/markdown",
author='Spark Developers',
author_email='dev@spark.apache.org',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.cloudpickle',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.sql.avro',
'pyspark.sql.pandas',
'pyspark.streaming',
'pyspark.bin',
'pyspark.sbin',
'pyspark.jars',
'pyspark.pandas',
'pyspark.pandas.data_type_ops',
'pyspark.pandas.indexes',
'pyspark.pandas.missing',
'pyspark.pandas.plot',
'pyspark.pandas.spark',
'pyspark.pandas.typedef',
'pyspark.pandas.usage_logging',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.resource',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.sbin': 'deps/sbin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.sbin': ['spark-config.sh', 'spark-daemon.sh',
'start-history-server.sh',
'stop-history-server.sh', ],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
# Don't forget to update python/docs/source/getting_started/install.rst
# if you're updating the versions or dependencies.
install_requires=['py4j==0.10.9.2'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
],
'pandas_on_spark': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
'numpy>=1.14',
],
},
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Typing :: Typed'],
cmdclass={
'install': InstallCommand,
},
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "sbin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "sbin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
mfjb/scikit-learn | sklearn/ensemble/__init__.py | 217 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
rvraghav93/scikit-learn | sklearn/linear_model/sag.py | 30 | 12959 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import make_dataset
from .sag_fast import sag
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
n_samples=None,
is_saga=False):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, optional
Number of rows in X. Useful if is_saga=True.
is_saga : boolean, optional
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
"""
if loss in ('log', 'multinomial'):
L = (0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1. / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1. / L
return step
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None,
is_saga=False):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : boolean, optional
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept, n_samples=n_samples,
is_saga=is_saga)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
schets/scikit-learn | sklearn/mixture/gmm.py | 7 | 31031 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""
Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization '+str(init+1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration '+str(i+1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: '+str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration '+str(i+1)+' took {0:.5f}s'.format(
time()-start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization '+str(init+1)+' took {0:.5f}s'.format(
time()-start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tests/types/test_dtypes.py | 1 | 6899 | # -*- coding: utf-8 -*-
from itertools import product
import nose
import numpy as np
from pandas import Series, Categorical, date_range
import pandas.core.common as com
from pandas.types.api import CategoricalDtype
from pandas.core.common import (is_categorical_dtype,
is_categorical, DatetimeTZDtype,
is_datetime64tz_dtype, is_datetimetz,
is_dtype_equal, is_datetime64_ns_dtype,
is_datetime64_dtype)
import pandas.util.testing as tm
_multiprocess_can_split_ = True
class Base(object):
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
self.assertRaises(self.dtype == 'foo')
def test_numpy_informed(self):
# np.dtype doesn't know about our new dtype
def f():
np.dtype(self.dtype)
self.assertRaises(TypeError, f)
self.assertNotEqual(self.dtype, np.str_)
self.assertNotEqual(np.str_, self.dtype)
def test_pickle(self):
result = self.round_trip_pickle(self.dtype)
self.assertEqual(result, self.dtype)
class TestCategoricalDtype(Base, tm.TestCase):
def setUp(self):
self.dtype = CategoricalDtype()
def test_equality(self):
self.assertTrue(is_dtype_equal(self.dtype, 'category'))
self.assertTrue(is_dtype_equal(self.dtype, CategoricalDtype()))
self.assertFalse(is_dtype_equal(self.dtype, 'foo'))
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
self.assertTrue(is_dtype_equal(self.dtype, result))
self.assertRaises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_is_dtype(self):
self.assertTrue(CategoricalDtype.is_dtype(self.dtype))
self.assertTrue(CategoricalDtype.is_dtype('category'))
self.assertTrue(CategoricalDtype.is_dtype(CategoricalDtype()))
self.assertFalse(CategoricalDtype.is_dtype('foo'))
self.assertFalse(CategoricalDtype.is_dtype(np.float64))
def test_basic(self):
self.assertTrue(is_categorical_dtype(self.dtype))
factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'
])
s = Series(factor, name='A')
# dtypes
self.assertTrue(is_categorical_dtype(s.dtype))
self.assertTrue(is_categorical_dtype(s))
self.assertFalse(is_categorical_dtype(np.dtype('float64')))
self.assertTrue(is_categorical(s.dtype))
self.assertTrue(is_categorical(s))
self.assertFalse(is_categorical(np.dtype('float64')))
self.assertFalse(is_categorical(1.0))
class TestDatetimeTZDtype(Base, tm.TestCase):
def setUp(self):
self.dtype = DatetimeTZDtype('ns', 'US/Eastern')
def test_construction(self):
self.assertRaises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype('datetime64[ns, CET]')
self.assertTrue(issubclass(type(a), type(a)))
self.assertTrue(issubclass(type(a), type(b)))
def test_coerce_to_dtype(self):
self.assertEqual(com._coerce_to_dtype('datetime64[ns, US/Eastern]'),
DatetimeTZDtype('ns', 'US/Eastern'))
self.assertEqual(com._coerce_to_dtype('datetime64[ns, Asia/Tokyo]'),
DatetimeTZDtype('ns', 'Asia/Tokyo'))
def test_compat(self):
self.assertFalse(is_datetime64_ns_dtype(self.dtype))
self.assertFalse(is_datetime64_ns_dtype('datetime64[ns, US/Eastern]'))
self.assertFalse(is_datetime64_dtype(self.dtype))
self.assertFalse(is_datetime64_dtype('datetime64[ns, US/Eastern]'))
def test_construction_from_string(self):
result = DatetimeTZDtype('datetime64[ns, US/Eastern]')
self.assertTrue(is_dtype_equal(self.dtype, result))
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
self.assertTrue(is_dtype_equal(self.dtype, result))
self.assertRaises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_is_dtype(self):
self.assertTrue(DatetimeTZDtype.is_dtype(self.dtype))
self.assertTrue(DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]'))
self.assertFalse(DatetimeTZDtype.is_dtype('foo'))
self.assertTrue(DatetimeTZDtype.is_dtype(DatetimeTZDtype(
'ns', 'US/Pacific')))
self.assertFalse(DatetimeTZDtype.is_dtype(np.float64))
def test_equality(self):
self.assertTrue(is_dtype_equal(self.dtype,
'datetime64[ns, US/Eastern]'))
self.assertTrue(is_dtype_equal(self.dtype, DatetimeTZDtype(
'ns', 'US/Eastern')))
self.assertFalse(is_dtype_equal(self.dtype, 'foo'))
self.assertFalse(is_dtype_equal(self.dtype, DatetimeTZDtype('ns',
'CET')))
self.assertFalse(is_dtype_equal(
DatetimeTZDtype('ns', 'US/Eastern'), DatetimeTZDtype(
'ns', 'US/Pacific')))
# numpy compat
self.assertTrue(is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]"))
def test_basic(self):
self.assertTrue(is_datetime64tz_dtype(self.dtype))
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
self.assertTrue(is_datetime64tz_dtype(s.dtype))
self.assertTrue(is_datetime64tz_dtype(s))
self.assertFalse(is_datetime64tz_dtype(np.dtype('float64')))
self.assertFalse(is_datetime64tz_dtype(1.0))
self.assertTrue(is_datetimetz(s))
self.assertTrue(is_datetimetz(s.dtype))
self.assertFalse(is_datetimetz(np.dtype('float64')))
self.assertFalse(is_datetimetz(1.0))
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
self.assertTrue(is_datetimetz(s1))
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
self.assertTrue(is_datetimetz(s2))
self.assertEqual(s1.dtype, s2.dtype)
def test_parser(self):
# pr #11245
for tz, constructor in product(('UTC', 'US/Eastern'),
('M8', 'datetime64')):
self.assertEqual(
DatetimeTZDtype('%s[ns, %s]' % (constructor, tz)),
DatetimeTZDtype('ns', tz),
)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/tests/frame/test_indexing.py | 7 | 104529 | # -*- coding: utf-8 -*-
from __future__ import print_function
from warnings import catch_warnings
from datetime import datetime, date, timedelta, time
from pandas.compat import map, zip, range, lrange, lzip, long
from pandas import compat
from numpy import nan
from numpy.random import randn
import pytest
import numpy as np
import pandas.core.common as com
from pandas import (DataFrame, Index, Series, notnull, isnull,
MultiIndex, DatetimeIndex, Timestamp,
date_range)
import pandas as pd
from pandas._libs.tslib import iNaT
from pandas.tseries.offsets import BDay
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer,
is_scalar)
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
from pandas.core.indexing import IndexingError
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameIndexing(TestData):
def test_getitem(self):
# Slicing
sl = self.frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in compat.iteritems(sl):
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in compat.iteritems(self.frame._series):
assert self.frame[key] is not None
assert 'random' not in self.frame
with tm.assert_raises_regex(KeyError, 'random'):
self.frame['random']
df = self.frame.copy()
df['$10'] = randn(len(df))
ad = randn(len(df))
df['@awesome_domain'] = ad
with pytest.raises(KeyError):
df.__getitem__('df["$10"]')
res = df['@awesome_domain']
tm.assert_numpy_array_equal(ad, res.values)
def test_getitem_dupe_cols(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
try:
df[['baf']]
except KeyError:
pass
else:
self.fail("Dataframe failed to raise KeyError")
def test_get(self):
b = self.frame.get('B')
assert_series_equal(b, self.frame['B'])
assert self.frame.get('foo') is None
assert_series_equal(self.frame.get('foo', self.frame['B']),
self.frame['B'])
# None
# GH 5652
for df in [DataFrame(), DataFrame(columns=list('AB')),
DataFrame(columns=list('AB'), index=range(3))]:
result = df.get(None)
assert result is None
def test_getitem_iterator(self):
idx = iter(['A', 'B', 'C'])
result = self.frame.loc[:, idx]
expected = self.frame.loc[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
idx = iter(['A', 'B', 'C'])
result = self.frame.loc[:, idx]
expected = self.frame.loc[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
def test_getitem_list(self):
self.frame.columns.name = 'foo'
result = self.frame[['B', 'A']]
result2 = self.frame[Index(['B', 'A'])]
expected = self.frame.loc[:, ['B', 'A']]
expected.columns.name = 'foo'
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert result.columns.name == 'foo'
with tm.assert_raises_regex(KeyError, 'not in index'):
self.frame[['B', 'A', 'food']]
with tm.assert_raises_regex(KeyError, 'not in index'):
self.frame[Index(['B', 'A', 'foo'])]
# tuples
df = DataFrame(randn(8, 3),
columns=Index([('foo', 'bar'), ('baz', 'qux'),
('peek', 'aboo')], name=['sth', 'sth2']))
result = df[[('foo', 'bar'), ('baz', 'qux')]]
expected = df.iloc[:, :2]
assert_frame_equal(result, expected)
assert result.columns.names == ['sth', 'sth2']
def test_getitem_callable(self):
# GH 12533
result = self.frame[lambda x: 'A']
tm.assert_series_equal(result, self.frame.loc[:, 'A'])
result = self.frame[lambda x: ['A', 'B']]
tm.assert_frame_equal(result, self.frame.loc[:, ['A', 'B']])
df = self.frame[:3]
result = df[lambda x: [True, False, True]]
tm.assert_frame_equal(result, self.frame.iloc[[0, 2], :])
def test_setitem_list(self):
self.frame['E'] = 'foo'
data = self.frame[['A', 'B']]
self.frame[['B', 'A']] = data
assert_series_equal(self.frame['B'], data['A'], check_names=False)
assert_series_equal(self.frame['A'], data['B'], check_names=False)
with tm.assert_raises_regex(ValueError,
'Columns must be same length as key'):
data[['A']] = self.frame[['A', 'B']]
with tm.assert_raises_regex(ValueError, 'Length of values '
'does not match '
'length of index'):
data['A'] = range(len(data.index) - 1)
df = DataFrame(0, lrange(3), ['tt1', 'tt2'], dtype=np.int_)
df.loc[1, ['tt1', 'tt2']] = [1, 2]
result = df.loc[df.index[1], ['tt1', 'tt2']]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
assert_series_equal(result, expected)
df['tt1'] = df['tt2'] = '0'
df.loc[df.index[1], ['tt1', 'tt2']] = ['1', '2']
result = df.loc[df.index[1], ['tt1', 'tt2']]
expected = Series(['1', '2'], df.columns, name=1)
assert_series_equal(result, expected)
def test_setitem_list_not_dataframe(self):
data = np.random.randn(len(self.frame), 2)
self.frame[['A', 'B']] = data
assert_almost_equal(self.frame[['A', 'B']].values, data)
def test_setitem_list_of_tuples(self):
tuples = lzip(self.frame['A'], self.frame['B'])
self.frame['tuples'] = tuples
result = self.frame['tuples']
expected = Series(tuples, index=self.frame.index, name='tuples')
assert_series_equal(result, expected)
def test_setitem_mulit_index(self):
# GH7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ['jim', 'joe', 'jolie'], ['first', 'last'], \
['left', 'center', 'right']
cols = MultiIndex.from_product(it)
index = pd.date_range('20141006', periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = pd.DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df['jim'] = df['jolie'].loc[i, ::-1]
assert_frame_equal(df['jim'], df['jolie'])
np.random.shuffle(j)
df[('joe', 'first')] = df[('jolie', 'last')].loc[i, j]
assert_frame_equal(df[('joe', 'first')], df[('jolie', 'last')])
np.random.shuffle(j)
df[('joe', 'last')] = df[('jolie', 'first')].loc[i, j]
assert_frame_equal(df[('joe', 'last')], df[('jolie', 'first')])
def test_setitem_callable(self):
# GH 12533
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [5, 6, 7, 8]})
df[lambda x: 'A'] = [11, 12, 13, 14]
exp = pd.DataFrame({'A': [11, 12, 13, 14], 'B': [5, 6, 7, 8]})
tm.assert_frame_equal(df, exp)
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
df = pd.DataFrame([[-1, 1], [1, -1]])
df[df > 0] = inc
expected = pd.DataFrame([[-1, inc], [inc, -1]])
tm.assert_frame_equal(df, expected)
def test_getitem_boolean(self):
# boolean indexing
d = self.tsframe.index[10]
indexer = self.tsframe.index > d
indexer_obj = indexer.astype(object)
subindex = self.tsframe.index[indexer]
subframe = self.tsframe[indexer]
tm.assert_index_equal(subindex, subframe.index)
with tm.assert_raises_regex(ValueError, 'Item wrong length'):
self.tsframe[indexer[:-1]]
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
with tm.assert_raises_regex(ValueError, 'boolean values only'):
self.tsframe[self.tsframe]
# test that Series work
indexer_obj = Series(indexer_obj, self.tsframe.index)
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [self.tsframe, self.mixed_frame,
self.mixed_float, self.mixed_int]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(dict([(c, np.where(data[c] > 0, data[c], np.nan))
for c in data.columns]),
index=data.index, columns=data.columns)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self):
# don't upcast if we don't need to
df = self.tsframe.copy()
df['E'] = 1
df['E'] = df['E'].astype('int32')
df['E1'] = df['E'].copy()
df['F'] = 1
df['F'] = df['F'].astype('int64')
df['F1'] = df['F'].copy()
casted = df[df > 0]
result = casted.get_dtype_counts()
expected = Series({'float64': 4, 'int32': 2, 'int64': 2})
assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ['E1', 'F1']] = 0
casted = df[df > 0]
result = casted.get_dtype_counts()
expected = Series({'float64': 6, 'int32': 1, 'int64': 1})
assert_series_equal(result, expected)
# where dtype conversions
# GH 3733
df = DataFrame(data=np.random.randn(100, 50))
df = df.where(df > 0) # create nans
bools = df > 0
mask = isnull(df)
expected = bools.astype(float).mask(mask)
result = bools.mask(mask)
assert_frame_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = randn(5, 5)
df = DataFrame(arr.copy(), columns=['A', 'B', 'C', 'D', 'E'])
df[df < 0] += 1
arr[arr < 0] += 1
assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=['A'],
index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(np.random.randn(4, 3),
index=[1, 10, 'C', 'E'], columns=[1, 2, 3])
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[[1, 10]]
expected = df.ix[Index([1, 10], dtype=object)]
assert_frame_equal(result, expected)
# 11320
df = pd.DataFrame({"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30]},
columns=['rna', -1000, 0, 1000])
result = df[[1000]]
expected = df.iloc[:, [3]]
assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
assert_frame_equal(result, expected)
def test_getitem_setitem_ix_negative_integers(self):
with catch_warnings(record=True):
result = self.frame.ix[:, -1]
assert_series_equal(result, self.frame['D'])
with catch_warnings(record=True):
result = self.frame.ix[:, [-1]]
assert_frame_equal(result, self.frame[['D']])
with catch_warnings(record=True):
result = self.frame.ix[:, [-1, -2]]
assert_frame_equal(result, self.frame[['D', 'C']])
with catch_warnings(record=True):
self.frame.ix[:, [-1]] = 0
assert (self.frame['D'] == 0).all()
df = DataFrame(np.random.randn(8, 4))
with catch_warnings(record=True):
assert isnull(df.ix[:, [-1]].values).all()
# #1942
a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])
with catch_warnings(record=True):
a.ix[-1] = a.ix[-2]
with catch_warnings(record=True):
assert_series_equal(a.ix[-1], a.ix[-2], check_names=False)
assert a.ix[-1].name == 'T'
assert a.ix[-2].name == 'S'
def test_getattr(self):
assert_series_equal(self.frame.A, self.frame['A'])
pytest.raises(AttributeError, getattr, self.frame,
'NONEXISTENT_NAME')
def test_setattr_column(self):
df = DataFrame({'foobar': 1}, index=lrange(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self):
# not sure what else to do here
series = self.frame['A'][::2]
self.frame['col5'] = series
assert 'col5' in self.frame
assert len(series) == 15
assert len(self.frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=self.frame.index, name='col5')
tm.assert_series_equal(self.frame['col5'], exp)
series = self.frame['A']
self.frame['col6'] = series
tm.assert_series_equal(series, self.frame['col6'], check_names=False)
with pytest.raises(KeyError):
self.frame[randn(len(self.frame) + 1)] = 1
# set ndarray
arr = randn(len(self.frame))
self.frame['col9'] = arr
assert (self.frame['col9'] == arr).all()
self.frame['col7'] = 5
assert((self.frame['col7'] == 5).all())
self.frame['col0'] = 3.14
assert((self.frame['col0'] == 3.14).all())
self.frame['col8'] = 'foo'
assert((self.frame['col8'] == 'foo').all())
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = self.frame[:2]
def f():
smaller['col10'] = ['1', '2']
pytest.raises(com.SettingWithCopyError, f)
assert smaller['col10'].dtype == np.object_
assert (smaller['col10'] == ['1', '2']).all()
# with a dtype
for dtype in ['int32', 'int64', 'float32', 'float64']:
self.frame[dtype] = np.array(arr, dtype=dtype)
assert self.frame[dtype].dtype.name == dtype
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
assert_frame_equal(df, expected)
def test_setitem_tuple(self):
self.frame['A', 'B'] = self.frame['A']
assert_series_equal(self.frame['A', 'B'], self.frame[
'A'], check_names=False)
def test_setitem_always_copy(self):
s = self.frame['A'].copy()
self.frame['E'] = s
self.frame['E'][5:10] = nan
assert notnull(s[5:10]).all()
def test_setitem_boolean(self):
df = self.frame.copy()
values = self.frame.values
df[df['A'] > 0] = 4
values[values[:, 0] > 0] = 4
assert_almost_equal(df.values, values)
# test that column reindexing works
series = df['A'] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
assert_almost_equal(df.values, values)
with tm.assert_raises_regex(TypeError, 'Must pass '
'DataFrame with '
'boolean values only'):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = nan
expected.values[mask.values] = nan
assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
assert_frame_equal(df, expected)
def test_setitem_cast(self):
self.frame['D'] = self.frame['D'].astype('i8')
assert self.frame['D'].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
self.frame['B'] = 0
assert self.frame['B'].dtype == np.int64
# cast if pass array of course
self.frame['B'] = np.arange(len(self.frame))
assert issubclass(self.frame['B'].dtype.type, np.integer)
self.frame['foo'] = 'bar'
self.frame['foo'] = 0
assert self.frame['foo'].dtype == np.int64
self.frame['foo'] = 'bar'
self.frame['foo'] = 2.5
assert self.frame['foo'].dtype == np.float64
self.frame['something'] = 0
assert self.frame['something'].dtype == np.int64
self.frame['something'] = 2
assert self.frame['something'].dtype == np.int64
self.frame['something'] = 2.5
assert self.frame['something'].dtype == np.float64
# GH 7704
# dtype conversion on setting
df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))
df['event'] = np.nan
df.loc[10, 'event'] = 'foo'
result = df.get_dtype_counts().sort_values()
expected = Series({'float64': 3, 'object': 1}).sort_values()
assert_series_equal(result, expected)
# Test that data type is preserved . #5782
df = DataFrame({'one': np.arange(6, dtype=np.int8)})
df.loc[1, 'one'] = 6
assert df.dtypes.one == np.dtype(np.int8)
df.one = np.int8(7)
assert df.dtypes.one == np.dtype(np.int8)
def test_setitem_boolean_column(self):
expected = self.frame.copy()
mask = self.frame['A'] > 0
self.frame.loc[mask, 'B'] = 0
expected.values[mask.values, 1] = 0
assert_frame_equal(self.frame, expected)
def test_setitem_corner(self):
# corner case
df = DataFrame({'B': [1., 2., 3.],
'C': ['a', 'b', 'c']},
index=np.arange(3))
del df['B']
df['B'] = [1., 2., 3.]
assert 'B' in df
assert len(df.columns) == 2
df['A'] = 'beginning'
df['E'] = 'foo'
df['D'] = 'bar'
df[datetime.now()] = 'date'
df[datetime.now()] = 5.
# what to do when empty frame with index
dm = DataFrame(index=self.frame.index)
dm['A'] = 'foo'
dm['B'] = 'bar'
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm['C'] = 1
assert dm['C'].dtype == np.int64
dm['E'] = 1.
assert dm['E'].dtype == np.float64
# set existing column
dm['A'] = 'bar'
assert 'bar' == dm['A'][0]
dm = DataFrame(index=np.arange(3))
dm['A'] = 1
dm['foo'] = 'bar'
del dm['foo']
dm['foo'] = 'bar'
assert dm['foo'].dtype == np.object_
dm['coercable'] = ['1', '2', '3']
assert dm['coercable'].dtype == np.object_
def test_setitem_corner2(self):
data = {"title": ['foobar', 'bar', 'foobar'] + ['foobar'] * 17,
"cruft": np.random.random(20)}
df = DataFrame(data)
ix = df[df['title'] == 'bar'].index
df.loc[ix, ['title']] = 'foobar'
df.loc[ix, ['cruft']] = 0
assert df.loc[1, 'title'] == 'foobar'
assert df.loc[1, 'cruft'] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=lrange(3), columns=lrange(3))
coercable_series = Series([Decimal(1) for _ in range(3)],
index=lrange(3))
uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_clear_caches(self):
# see gh-304
df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]},
index=[0, 1, 2, 3])
df.insert(2, 'z', np.nan)
# cache it
foo = df['z']
df.loc[df.index[2:], 'z'] = 42
expected = Series([np.nan, np.nan, 42, 42], index=df.index, name='z')
assert df['z'] is not foo
tm.assert_series_equal(df['z'], expected)
def test_setitem_None(self):
# GH #766
self.frame[None] = self.frame['A']
assert_series_equal(
self.frame.iloc[:, -1], self.frame['A'], check_names=False)
assert_series_equal(self.frame.loc[:, None], self.frame[
'A'], check_names=False)
assert_series_equal(self.frame[None], self.frame[
'A'], check_names=False)
repr(self.frame)
def test_setitem_empty(self):
# GH 9596
df = pd.DataFrame({'a': ['1', '2', '3'],
'b': ['11', '22', '33'],
'c': ['111', '222', '333']})
result = df.copy()
result.loc[result.b.isnull(), 'a'] = result.a
assert_frame_equal(result, df)
def test_setitem_empty_frame_with_boolean(self):
# Test for issue #10126
for dtype in ('float', 'int64'):
for df in [
pd.DataFrame(dtype=dtype),
pd.DataFrame(dtype=dtype, index=[1]),
pd.DataFrame(dtype=dtype, columns=['A']),
]:
df2 = df.copy()
df[df > df2] = 47
assert_frame_equal(df, df2)
def test_getitem_empty_frame_with_boolean(self):
# Test for issue #11859
df = pd.DataFrame()
df2 = df[df > 0]
assert_frame_equal(df, df2)
def test_delitem_corner(self):
f = self.frame.copy()
del f['D']
assert len(f.columns) == 3
pytest.raises(KeyError, f.__delitem__, 'D')
del f['B']
assert len(f.columns) == 2
def test_getitem_fancy_2d(self):
f = self.frame
with catch_warnings(record=True):
assert_frame_equal(f.ix[:, ['B', 'A']],
f.reindex(columns=['B', 'A']))
subidx = self.frame.index[[5, 4, 1]]
with catch_warnings(record=True):
assert_frame_equal(f.ix[subidx, ['B', 'A']],
f.reindex(index=subidx, columns=['B', 'A']))
# slicing rows, etc.
with catch_warnings(record=True):
assert_frame_equal(f.ix[5:10], f[5:10])
assert_frame_equal(f.ix[5:10, :], f[5:10])
assert_frame_equal(f.ix[:5, ['A', 'B']],
f.reindex(index=f.index[:5],
columns=['A', 'B']))
# slice rows with labels, inclusive!
with catch_warnings(record=True):
expected = f.ix[5:11]
result = f.ix[f.index[5]:f.index[10]]
assert_frame_equal(expected, result)
# slice columns
with catch_warnings(record=True):
assert_frame_equal(f.ix[:, :2], f.reindex(columns=['A', 'B']))
# get view
with catch_warnings(record=True):
exp = f.copy()
f.ix[5:10].values[:] = 5
exp.values[5:10] = 5
assert_frame_equal(f, exp)
with catch_warnings(record=True):
pytest.raises(ValueError, f.ix.__getitem__, f > 0.5)
def test_slice_floats(self):
index = [52195.504153, 52196.303147, 52198.369883]
df = DataFrame(np.random.rand(3, 2), index=index)
s1 = df.loc[52195.1:52196.5]
assert len(s1) == 2
s1 = df.loc[52195.1:52196.6]
assert len(s1) == 2
s1 = df.loc[52195.1:52198.9]
assert len(s1) == 3
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isnull(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]]
pytest.raises(KeyError, df2.loc.__getitem__, slice(3, 11))
pytest.raises(KeyError, df2.loc.__setitem__, slice(3, 11), 0)
def test_setitem_fancy_2d(self):
# case 1
frame = self.frame.copy()
expected = frame.copy()
with catch_warnings(record=True):
frame.ix[:, ['B', 'A']] = 1
expected['B'] = 1.
expected['A'] = 1.
assert_frame_equal(frame, expected)
# case 2
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = frame.copy()
subidx = self.frame.index[[5, 4, 1]]
values = randn(3, 2)
with catch_warnings(record=True):
frame.ix[subidx, ['B', 'A']] = values
frame2.ix[[5, 4, 1], ['B', 'A']] = values
expected['B'].ix[subidx] = values[:, 0]
expected['A'].ix[subidx] = values[:, 1]
assert_frame_equal(frame, expected)
assert_frame_equal(frame2, expected)
# case 3: slicing rows, etc.
frame = self.frame.copy()
with catch_warnings(record=True):
expected1 = self.frame.copy()
frame.ix[5:10] = 1.
expected1.values[5:10] = 1.
assert_frame_equal(frame, expected1)
with catch_warnings(record=True):
expected2 = self.frame.copy()
arr = randn(5, len(frame.columns))
frame.ix[5:10] = arr
expected2.values[5:10] = arr
assert_frame_equal(frame, expected2)
# case 4
with catch_warnings(record=True):
frame = self.frame.copy()
frame.ix[5:10, :] = 1.
assert_frame_equal(frame, expected1)
frame.ix[5:10, :] = arr
assert_frame_equal(frame, expected2)
# case 5
with catch_warnings(record=True):
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
values = randn(5, 2)
frame.ix[:5, ['A', 'B']] = values
expected['A'][:5] = values[:, 0]
expected['B'][:5] = values[:, 1]
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
frame2.ix[:5, [0, 1]] = values
assert_frame_equal(frame2, expected)
# case 6: slice rows with labels, inclusive!
with catch_warnings(record=True):
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[frame.index[5]:frame.index[10]] = 5.
expected.values[5:11] = 5
assert_frame_equal(frame, expected)
# case 7: slice columns
with catch_warnings(record=True):
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
# slice indices
frame.ix[:, 1:3] = 4.
expected.values[:, 1:3] = 4.
assert_frame_equal(frame, expected)
# slice with labels
frame.ix[:, 'B':'C'] = 4.
assert_frame_equal(frame, expected)
# new corner case of boolean slicing / setting
frame = DataFrame(lzip([2, 3, 9, 6, 7], [np.nan] * 5),
columns=['a', 'b'])
lst = [100]
lst.extend([np.nan] * 4)
expected = DataFrame(lzip([100, 3, 9, 6, 7], lst),
columns=['a', 'b'])
frame[frame['a'] == 2] = 100
assert_frame_equal(frame, expected)
def test_fancy_getitem_slice_mixed(self):
sliced = self.mixed_frame.iloc[:, -3:]
assert sliced['D'].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = self.frame.iloc[:, -3:]
def f():
sliced['C'] = 4.
pytest.raises(com.SettingWithCopyError, f)
assert (self.frame['C'] == 4).all()
def test_fancy_setitem_int_labels(self):
# integer index defers to label-based indexing
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
tmp = df.copy()
exp = df.copy()
tmp.ix[[0, 2, 4]] = 5
exp.values[:3] = 5
assert_frame_equal(tmp, exp)
with catch_warnings(record=True):
tmp = df.copy()
exp = df.copy()
tmp.ix[6] = 5
exp.values[3] = 5
assert_frame_equal(tmp, exp)
with catch_warnings(record=True):
tmp = df.copy()
exp = df.copy()
tmp.ix[:, 2] = 5
# tmp correctly sets the dtype
# so match the exp way
exp[2] = 5
assert_frame_equal(tmp, exp)
def test_fancy_getitem_int_labels(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
result = df.ix[[4, 2, 0], [2, 0]]
expected = df.reindex(index=[4, 2, 0], columns=[2, 0])
assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[[4, 2, 0]]
expected = df.reindex(index=[4, 2, 0])
assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[4]
expected = df.xs(4)
assert_series_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[:, 3]
expected = df[3]
assert_series_equal(result, expected)
def test_fancy_index_int_labels_exceptions(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
# labels that aren't contained
pytest.raises(KeyError, df.ix.__setitem__,
([0, 1, 2], [2, 3, 4]), 5)
# try to set indices not contained in frame
pytest.raises(KeyError, self.frame.ix.__setitem__,
['foo', 'bar', 'baz'], 1)
pytest.raises(KeyError, self.frame.ix.__setitem__,
(slice(None, None), ['E']), 1)
# partial setting now allows this GH2578
# pytest.raises(KeyError, self.frame.ix.__setitem__,
# (slice(None, None), 'E'), 1)
def test_setitem_fancy_mixed_2d(self):
with catch_warnings(record=True):
self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5
result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
assert (result.values == 5).all()
self.mixed_frame.ix[5] = np.nan
assert isnull(self.mixed_frame.ix[5]).all()
self.mixed_frame.ix[5] = self.mixed_frame.ix[6]
assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6],
check_names=False)
# #1432
with catch_warnings(record=True):
df = DataFrame({1: [1., 2., 3.],
2: [3, 4, 5]})
assert df._is_mixed_type
df.ix[1] = [5, 10]
expected = DataFrame({1: [1., 5., 3.],
2: [3, 10, 5]})
assert_frame_equal(df, expected)
def test_ix_align(self):
b = Series(randn(10), name=0).sort_values()
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
with catch_warnings(record=True):
df.ix[:, 0] = b
assert_series_equal(df.ix[:, 0].reindex(b.index), b)
with catch_warnings(record=True):
dft = df_orig.T
dft.ix[0, :] = b
assert_series_equal(dft.ix[0, :].reindex(b.index), b)
with catch_warnings(record=True):
df = df_orig.copy()
df.ix[:5, 0] = b
s = df.ix[:5, 0]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
dft = df_orig.T
dft.ix[0, :5] = b
s = dft.ix[0, :5]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
df = df_orig.copy()
idx = [0, 1, 3, 5]
df.ix[idx, 0] = b
s = df.ix[idx, 0]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
dft = df_orig.T
dft.ix[0, idx] = b
s = dft.ix[0, idx]
assert_series_equal(s, b.reindex(s.index))
def test_ix_frame_align(self):
b = DataFrame(np.random.randn(3, 4))
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
with catch_warnings(record=True):
df.ix[:3] = b
out = b.ix[:3]
assert_frame_equal(out, b)
b.sort_index(inplace=True)
with catch_warnings(record=True):
df = df_orig.copy()
df.ix[[0, 1, 2]] = b
out = df.ix[[0, 1, 2]].reindex(b.index)
assert_frame_equal(out, b)
with catch_warnings(record=True):
df = df_orig.copy()
df.ix[:3] = b
out = df.ix[:3]
assert_frame_equal(out, b.reindex(out.index))
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
assert_frame_equal(rs, xp)
""" #1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index==0, df.columns==1]
xp = df.reindex([0], [1])
assert_frame_equal(rs, xp)
"""
def test_ix_multi_take_nonint_index(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=['a', 'b'])
with catch_warnings(record=True):
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=['a'])
assert_frame_equal(rs, xp)
def test_ix_multi_take_multiindex(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=[['a', 'b'], ['1', '2']])
with catch_warnings(record=True):
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=[('a', '1')])
assert_frame_equal(rs, xp)
def test_ix_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
df = DataFrame(np.random.randn(len(idx), 3), idx)
with catch_warnings(record=True):
sub = df.ix[:'d']
assert_frame_equal(sub, df)
with catch_warnings(record=True):
sub = df.ix['a':'c']
assert_frame_equal(sub, df.ix[0:4])
with catch_warnings(record=True):
sub = df.ix['b':'d']
assert_frame_equal(sub, df.ix[2:])
def test_getitem_fancy_1d(self):
f = self.frame
# return self if no slicing...for now
with catch_warnings(record=True):
assert f.ix[:, :] is f
# low dimensional slice
with catch_warnings(record=True):
xs1 = f.ix[2, ['C', 'B', 'A']]
xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])
tm.assert_series_equal(xs1, xs2)
with catch_warnings(record=True):
ts1 = f.ix[5:10, 2]
ts2 = f[f.columns[2]][5:10]
tm.assert_series_equal(ts1, ts2)
# positional xs
with catch_warnings(record=True):
xs1 = f.ix[0]
xs2 = f.xs(f.index[0])
tm.assert_series_equal(xs1, xs2)
with catch_warnings(record=True):
xs1 = f.ix[f.index[5]]
xs2 = f.xs(f.index[5])
tm.assert_series_equal(xs1, xs2)
# single column
with catch_warnings(record=True):
assert_series_equal(f.ix[:, 'A'], f['A'])
# return view
with catch_warnings(record=True):
exp = f.copy()
exp.values[5] = 4
f.ix[5][:] = 4
tm.assert_frame_equal(exp, f)
with catch_warnings(record=True):
exp.values[:, 1] = 6
f.ix[:, 1][:] = 6
tm.assert_frame_equal(exp, f)
# slice of mixed-frame
with catch_warnings(record=True):
xs = self.mixed_frame.ix[5]
exp = self.mixed_frame.xs(self.mixed_frame.index[5])
tm.assert_series_equal(xs, exp)
def test_setitem_fancy_1d(self):
# case 1: set cross-section for indices
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]
expected['C'][2] = 1.
expected['B'][2] = 2.
expected['A'][2] = 3.
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
frame2 = self.frame.copy()
frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]
assert_frame_equal(frame, expected)
# case 2, set a section of a column
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
vals = randn(5)
expected.values[5:10, 2] = vals
frame.ix[5:10, 2] = vals
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
frame2 = self.frame.copy()
frame2.ix[5:10, 'B'] = vals
assert_frame_equal(frame, expected)
# case 3: full xs
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
frame.ix[4] = 5.
expected.values[4] = 5.
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
frame.ix[frame.index[4]] = 6.
expected.values[4] = 6.
assert_frame_equal(frame, expected)
# single column
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
frame.ix[:, 'A'] = 7.
expected['A'] = 7.
assert_frame_equal(frame, expected)
def test_getitem_fancy_scalar(self):
f = self.frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
def test_setitem_fancy_scalar(self):
f = self.frame
expected = self.frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = randn()
expected.values[i, j] = val
ix[idx, col] = val
assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self):
f = self.frame
ix = f.loc
expected = f.reindex(columns=['B', 'D'])
result = ix[:, [False, True, False, True]]
assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])
result = ix[f.index[5:10], [False, True, False, True]]
assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
assert_frame_equal(result, expected)
result = ix[boolvec, :]
assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec],
columns=['C', 'D'])
assert_frame_equal(result, expected)
def test_setitem_fancy_boolean(self):
# from 2d, set with booleans
frame = self.frame.copy()
expected = self.frame.copy()
mask = frame['A'] > 0
frame.loc[mask] = 0.
expected.values[mask.values] = 0.
assert_frame_equal(frame, expected)
frame = self.frame.copy()
expected = self.frame.copy()
frame.loc[mask, ['A', 'B']] = 0.
expected.values[mask.values, :2] = 0.
assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self):
result = self.frame.iloc[[1, 4, 7]]
expected = self.frame.loc[self.frame.index[[1, 4, 7]]]
assert_frame_equal(result, expected)
result = self.frame.iloc[:, [2, 0, 1]]
expected = self.frame.loc[:, self.frame.columns[[2, 0, 1]]]
assert_frame_equal(result, expected)
def test_getitem_setitem_fancy_exceptions(self):
ix = self.frame.iloc
with tm.assert_raises_regex(IndexingError, 'Too many indexers'):
ix[:, :, :]
with pytest.raises(IndexingError):
ix[:, :, :] = 1
def test_getitem_setitem_boolean_misaligned(self):
# boolean index misaligned labels
mask = self.frame['A'][::-1] > 1
result = self.frame.loc[mask]
expected = self.frame.loc[mask[::-1]]
assert_frame_equal(result, expected)
cp = self.frame.copy()
expected = self.frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]),
np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
pytest.raises(TypeError, lambda: df.iloc[1.0:5])
result = df.iloc[4:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
def f():
cp.iloc[1.0:5] = 0
pytest.raises(TypeError, f)
def f():
result = cp.iloc[1.0:5] == 0 # noqa
pytest.raises(TypeError, f)
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['str'] = 'qux'
df.loc[df.index[::2], 'str'] = nan
expected = np.array([nan, 'qux', nan, 'qux', nan], dtype=object)
assert_almost_equal(df['str'].values, expected)
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['timestamp'] = Timestamp('20010102')
# check our dtypes
result = df.get_dtype_counts()
expected = Series({'float64': 3, 'datetime64[ns]': 1})
assert_series_equal(result, expected)
# set an allowable datetime64 type
df.loc['b', 'timestamp'] = iNaT
assert isnull(df.loc['b', 'timestamp'])
# allow this syntax
df.loc['c', 'timestamp'] = nan
assert isnull(df.loc['c', 'timestamp'])
# allow this syntax
df.loc['d', :] = nan
assert not isnull(df.loc['c', :]).all()
# as of GH 3216 this will now work!
# try to set with a list like item
# pytest.raises(
# Exception, df.loc.__setitem__, ('d', 'timestamp'), [nan])
def test_setitem_frame(self):
piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
self.frame.loc[self.frame.index[-2]:, ['A', 'B']] = piece.values
result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
# GH 3216
# already aligned
f = self.mixed_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.]],
index=f.index[0:2], columns=['A', 'B'])
key = (slice(None, 2), ['A', 'B'])
f.loc[key] = piece
assert_almost_equal(f.loc[f.index[0:2], ['A', 'B']].values,
piece.values)
# rows unaligned
f = self.mixed_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.], [5., 6.], [7., 8.]],
index=list(f.index[0:2]) + ['foo', 'bar'],
columns=['A', 'B'])
key = (slice(None, 2), ['A', 'B'])
f.loc[key] = piece
assert_almost_equal(f.loc[f.index[0:2:], ['A', 'B']].values,
piece.values[0:2])
# key is unaligned with values
f = self.mixed_frame.copy()
piece = f.loc[f.index[:2], ['A']]
piece.index = f.index[-2:]
key = (slice(-2, None), ['A', 'B'])
f.loc[key] = piece
piece['B'] = np.nan
assert_almost_equal(f.loc[f.index[-2:], ['A', 'B']].values,
piece.values)
# ndarray
f = self.mixed_frame.copy()
piece = self.mixed_frame.loc[f.index[:2], ['A', 'B']]
key = (slice(-2, None), ['A', 'B'])
f.loc[key] = piece.values
assert_almost_equal(f.loc[f.index[-2:], ['A', 'B']].values,
piece.values)
# needs upcasting
df = DataFrame([[1, 2, 'foo'], [3, 4, 'bar']], columns=['A', 'B', 'C'])
df2 = df.copy()
df2.loc[:, ['A', 'B']] = df.loc[:, ['A', 'B']] + 0.5
expected = df.reindex(columns=['A', 'B'])
expected += 0.5
expected['C'] = df['C']
assert_frame_equal(df2, expected)
def test_setitem_frame_align(self):
piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
piece.index = self.frame.index[-2:]
piece.columns = ['A', 'B']
self.frame.loc[self.frame.index[-2:], ['A', 'B']] = piece
result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.loc['foo']
expected = df[:2]
assert_frame_equal(result, expected)
result = df.loc['bar']
expected = df.iloc[[2, 4]]
assert_frame_equal(result, expected)
result = df.loc['baz']
expected = df.iloc[3]
assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.loc[['bar']]
exp = df.iloc[[2, 4]]
assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
assert_frame_equal(result, exp)
def test_getitem_setitem_ix_bool_keyerror(self):
# #2199
df = DataFrame({'a': [1, 2, 3]})
pytest.raises(KeyError, df.loc.__getitem__, False)
pytest.raises(KeyError, df.loc.__getitem__, True)
pytest.raises(KeyError, df.loc.__setitem__, False, 0)
pytest.raises(KeyError, df.loc.__setitem__, True, 0)
def test_getitem_list_duplicates(self):
# #1943
df = DataFrame(np.random.randn(4, 4), columns=list('AABC'))
df.columns.name = 'foo'
result = df[['B', 'C']]
assert result.columns.name == 'foo'
expected = df.iloc[:, 2:]
assert_frame_equal(result, expected)
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert result == expected
def test_lookup(self):
def alt(df, rows, cols, dtype):
result = []
for r, c in zip(rows, cols):
result.append(df.get_value(r, c))
return np.array(result, dtype=dtype)
def testit(df):
rows = list(df.index) * len(df.columns)
cols = list(df.columns) * len(df.index)
result = df.lookup(rows, cols)
expected = alt(df, rows, cols, dtype=np.object_)
tm.assert_almost_equal(result, expected, check_dtype=False)
testit(self.mixed_frame)
testit(self.frame)
df = DataFrame({'label': ['a', 'b', 'a', 'c'],
'mask_a': [True, True, False, True],
'mask_b': [True, False, False, False],
'mask_c': [False, True, False, True]})
df['mask'] = df.lookup(df.index, 'mask_' + df['label'])
exp_mask = alt(df, df.index, 'mask_' + df['label'], dtype=np.bool_)
tm.assert_series_equal(df['mask'], pd.Series(exp_mask, name='mask'))
assert df['mask'].dtype == np.bool_
with pytest.raises(KeyError):
self.frame.lookup(['xyz'], ['A'])
with pytest.raises(KeyError):
self.frame.lookup([self.frame.index[0]], ['xyz'])
with tm.assert_raises_regex(ValueError, 'same size'):
self.frame.lookup(['a', 'b', 'c'], ['a'])
def test_set_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
self.frame.set_value(idx, col, 1)
assert self.frame[col][idx] == 1
def test_set_value_resize(self):
res = self.frame.set_value('foobar', 'B', 0)
assert res is self.frame
assert res.index[-1] == 'foobar'
assert res.get_value('foobar', 'B') == 0
self.frame.loc['foobar', 'qux'] = 0
assert self.frame.get_value('foobar', 'qux') == 0
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 'sam')
assert res3['baz'].dtype == np.object_
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', True)
assert res3['baz'].dtype == np.object_
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 5)
assert is_float_dtype(res3['baz'])
assert isnull(res3['baz'].drop(['foobar'])).all()
pytest.raises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC'))
# this is actually ambiguous as the 2 is interpreted as a positional
# so column is not created
df = df_orig.copy()
df.set_value('C', 2, 1.0)
assert list(df.index) == list(df_orig.index) + ['C']
# assert list(df.columns) == list(df_orig.columns) + [2]
df = df_orig.copy()
df.loc['C', 2] = 1.0
assert list(df.index) == list(df_orig.index) + ['C']
# assert list(df.columns) == list(df_orig.columns) + [2]
# create both new
df = df_orig.copy()
df.set_value('C', 'D', 1.0)
assert list(df.index) == list(df_orig.index) + ['C']
assert list(df.columns) == list(df_orig.columns) + ['D']
df = df_orig.copy()
df.loc['C', 'D'] = 1.0
assert list(df.index) == list(df_orig.index) + ['C']
assert list(df.columns) == list(df_orig.columns) + ['D']
def test_get_set_value_no_partial_indexing(self):
# partial w/ MultiIndex raise exception
index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])
df = DataFrame(index=index, columns=lrange(4))
pytest.raises(KeyError, df.get_value, 0, 1)
# pytest.raises(KeyError, df.set_value, 0, 1, 0)
def test_single_element_ix_dont_upcast(self):
self.frame['E'] = 1
assert issubclass(self.frame['E'].dtype.type, (int, np.integer))
with catch_warnings(record=True):
result = self.frame.ix[self.frame.index[5], 'E']
assert is_integer(result)
result = self.frame.loc[self.frame.index[5], 'E']
assert is_integer(result)
# GH 11617
df = pd.DataFrame(dict(a=[1.23]))
df["b"] = 666
with catch_warnings(record=True):
result = df.ix[0, "b"]
assert is_integer(result)
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name='b')
with catch_warnings(record=True):
result = df.ix[[0], "b"]
assert_series_equal(result, expected)
result = df.loc[[0], "b"]
assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
assert_frame_equal(result, expected)
# verify slice is view
# setting it makes it raise/warn
def f():
result[2] = 0.
pytest.raises(com.SettingWithCopyError, f)
exp_col = df[2].copy()
exp_col[4:8] = 0.
assert_series_equal(df[2], exp_col)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
assert_frame_equal(result, expected)
# verify slice is view
# and that we are setting a copy
def f():
result[8] = 0.
pytest.raises(com.SettingWithCopyError, f)
assert (df[8] == 0).all()
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_iloc_duplicates(self):
df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),
index=list('aab'))
result = df.iloc[0]
with catch_warnings(record=True):
result2 = df.ix[0]
assert isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
with catch_warnings(record=True):
result = df.T.iloc[:, 0]
result2 = df.T.ix[:, 0]
assert isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
# multiindex
df = DataFrame(np.random.randn(3, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
with catch_warnings(record=True):
rs = df.iloc[0]
xp = df.ix[0]
assert_series_equal(rs, xp)
with catch_warnings(record=True):
rs = df.iloc[:, 0]
xp = df.T.ix[0]
assert_series_equal(rs, xp)
with catch_warnings(record=True):
rs = df.iloc[:, [0]]
xp = df.ix[:, [0]]
assert_frame_equal(rs, xp)
# #2259
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])
result = df.iloc[:, [0]]
expected = df.take([0], axis=1)
assert_frame_equal(result, expected)
def test_iloc_sparse_propegate_fill_value(self):
from pandas.core.sparse.api import SparseDataFrame
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
assert len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values)
def test_iat(self):
for i, row in enumerate(self.frame.index):
for j, col in enumerate(self.frame.columns):
result = self.frame.iat[i, j]
expected = self.frame.at[row, col]
assert result == expected
def test_nested_exception(self):
# Ignore the strange way of triggering the problem
# (which may get fixed), it's just a way to trigger
# the issue or reraising an outer exception without
# a named argument
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ["a", "b"]
df.index = l
try:
repr(df)
except Exception as e:
assert type(e) != UnboundLocalError
def test_reindex_methods(self):
df = pd.DataFrame({'x': list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
for method, expected_values in [('nearest', [0, 1, 1, 2]),
('pad', [np.nan, 0, 1, 1]),
('backfill', [0, 1, 2, 2])]:
expected = pd.DataFrame({'x': expected_values}, index=target)
actual = df.reindex(target, method=method)
assert_frame_equal(expected, actual)
actual = df.reindex_like(df, method=method, tolerance=0)
assert_frame_equal(df, actual)
actual = df.reindex(target, method=method, tolerance=1)
assert_frame_equal(expected, actual)
e2 = expected[::-1]
actual = df.reindex(target[::-1], method=method)
assert_frame_equal(e2, actual)
new_order = [3, 0, 2, 1]
e2 = expected.iloc[new_order]
actual = df.reindex(target[new_order], method=method)
assert_frame_equal(e2, actual)
switched_method = ('pad' if method == 'backfill'
else 'backfill' if method == 'pad'
else method)
actual = df[::-1].reindex(target, method=switched_method)
assert_frame_equal(expected, actual)
expected = pd.DataFrame({'x': [0, 1, 1, np.nan]}, index=target)
actual = df.reindex(target, method='nearest', tolerance=0.2)
assert_frame_equal(expected, actual)
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
assert np.issubdtype(result['B'].dtype, np.dtype('M8[ns]'))
mask = com.isnull(result)['B']
assert mask[-5:].all()
assert not mask[:-5].any()
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
assert x[0].dtype == np.dtype('M8[ns]')
def test_non_monotonic_reindex_methods(self):
dr = pd.date_range('2013-08-01', periods=6, freq='B')
data = np.random.randn(6, 1)
df = pd.DataFrame(data, index=dr, columns=list('A'))
df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]],
columns=list('A'))
# index is not monotonic increasing or decreasing
pytest.raises(ValueError, df_rev.reindex, df.index, method='pad')
pytest.raises(ValueError, df_rev.reindex, df.index, method='ffill')
pytest.raises(ValueError, df_rev.reindex, df.index, method='bfill')
pytest.raises(ValueError, df_rev.reindex, df.index, method='nearest')
def test_reindex_level(self):
from itertools import permutations
icol = ['jim', 'joe', 'jolie']
def verify_first_level(df, level, idx, check_index_type=True):
f = lambda val: np.nonzero(df[level] == val)[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[i].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
def verify(df, level, idx, indexer, check_index_type=True):
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[indexer].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
df = pd.DataFrame({'jim': list('B' * 4 + 'A' * 2 + 'C' * 3),
'joe': list('abcdeabcd')[::-1],
'jolie': [10, 20, 30] * 3,
'joline': np.random.randint(0, 1000, 9)})
target = [['C', 'B', 'A'], ['F', 'C', 'A', 'D'], ['A'],
['A', 'B', 'C'], ['C', 'A', 'B'], ['C', 'B'], ['C', 'A'],
['A', 'B'], ['B', 'A', 'C']]
for idx in target:
verify_first_level(df, 'jim', idx)
# reindex by these causes different MultiIndex levels
for idx in [['D', 'F'], ['A', 'C', 'B']]:
verify_first_level(df, 'jim', idx, check_index_type=False)
verify(df, 'joe', list('abcde'), [3, 2, 1, 0, 5, 4, 8, 7, 6])
verify(df, 'joe', list('abcd'), [3, 2, 1, 0, 5, 8, 7, 6])
verify(df, 'joe', list('abc'), [3, 2, 1, 8, 7, 6])
verify(df, 'joe', list('eca'), [1, 3, 4, 6, 8])
verify(df, 'joe', list('edc'), [0, 1, 4, 5, 6])
verify(df, 'joe', list('eadbc'), [3, 0, 2, 1, 4, 5, 8, 7, 6])
verify(df, 'joe', list('edwq'), [0, 4, 5])
verify(df, 'joe', list('wq'), [], check_index_type=False)
df = DataFrame({'jim': ['mid'] * 5 + ['btm'] * 8 + ['top'] * 7,
'joe': ['3rd'] * 2 + ['1st'] * 3 + ['2nd'] * 3 +
['1st'] * 2 + ['3rd'] * 3 + ['1st'] * 2 +
['3rd'] * 3 + ['2nd'] * 2,
# this needs to be jointly unique with jim and joe or
# reindexing will fail ~1.5% of the time, this works
# out to needing unique groups of same size as joe
'jolie': np.concatenate([
np.random.choice(1000, x, replace=False)
for x in [2, 3, 3, 2, 3, 2, 3, 2]]),
'joline': np.random.randn(20).round(3) * 10})
for idx in permutations(df['jim'].unique()):
for i in range(3):
verify_first_level(df, 'jim', idx[:i + 1])
i = [2, 3, 4, 0, 1, 8, 9, 5, 6, 7, 10,
11, 12, 13, 14, 18, 19, 15, 16, 17]
verify(df, 'joe', ['1st', '2nd', '3rd'], i)
i = [0, 1, 2, 3, 4, 10, 11, 12, 5, 6,
7, 8, 9, 15, 16, 17, 18, 19, 13, 14]
verify(df, 'joe', ['3rd', '2nd', '1st'], i)
i = [0, 1, 5, 6, 7, 10, 11, 12, 18, 19, 15, 16, 17]
verify(df, 'joe', ['2nd', '3rd'], i)
i = [0, 1, 2, 3, 4, 10, 11, 12, 8, 9, 15, 16, 17, 13, 14]
verify(df, 'joe', ['3rd', '1st'], i)
def test_getitem_ix_float_duplicates(self):
df = pd.DataFrame(np.random.randn(3, 3),
index=[0.1, 0.2, 0.2], columns=list('abc'))
expect = df.iloc[1:]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [1, 0.2, 0.2]
expect = df.iloc[1:]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df = pd.DataFrame(np.random.randn(4, 3),
index=[1, 0.2, 0.2, 1], columns=list('abc'))
expect = df.iloc[1:-1]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:-1, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [0.1, 0.2, 2, 0.2]
expect = df.iloc[[1, -1]]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[[1, -1], 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
def test_setitem_with_sparse_value(self):
# GH8131
df = pd.DataFrame({'c_1': ['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = pd.Series([0, 0, 1]).to_sparse(fill_value=0)
df['new_column'] = sp_series
assert_series_equal(df['new_column'], sp_series, check_names=False)
def test_setitem_with_unaligned_sparse_value(self):
df = pd.DataFrame({'c_1': ['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = (pd.Series([0, 0, 1], index=[2, 1, 0])
.to_sparse(fill_value=0))
df['new_column'] = sp_series
exp = pd.Series([1, 0, 0], name='new_column')
assert_series_equal(df['new_column'], exp)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = pd.Series(pd.date_range('2015-01-01', periods=3, tz='utc'),
name='dates')
df = pd.DataFrame({'dates': column})
df['dates'] = column[[1, 0, 2]]
assert_series_equal(df['dates'], column)
df = pd.DataFrame({'dates': column})
df.loc[[0, 1, 2], 'dates'] = column[[1, 0, 2]]
assert_series_equal(df['dates'], column)
def test_setitem_datetime_coercion(self):
# gh-1048
df = pd.DataFrame({'c': [pd.Timestamp('2010-10-01')] * 3})
df.loc[0:1, 'c'] = np.datetime64('2008-08-08')
assert pd.Timestamp('2008-08-08') == df.loc[0, 'c']
assert pd.Timestamp('2008-08-08') == df.loc[1, 'c']
df.loc[2, 'c'] = date(2005, 5, 5)
assert pd.Timestamp('2005-05-05') == df.loc[2, 'c']
def test_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range('20130101', periods=4))
df['A'] = np.array([1 * one_hour] * 4, dtype='m8[ns]')
df.loc[:, 'B'] = np.array([2 * one_hour] * 4, dtype='m8[ns]')
df.loc[:3, 'C'] = np.array([3 * one_hour] * 3, dtype='m8[ns]')
df.loc[:, 'D'] = np.array([4 * one_hour] * 4, dtype='m8[ns]')
df.loc[df.index[:3], 'E'] = np.array([5 * one_hour] * 3,
dtype='m8[ns]')
df['F'] = np.timedelta64('NaT')
df.loc[df.index[:-1], 'F'] = np.array([6 * one_hour] * 3,
dtype='m8[ns]')
df.loc[df.index[-3]:, 'G'] = date_range('20130101', periods=3)
df['H'] = np.datetime64('NaT')
result = df.dtypes
expected = Series([np.dtype('timedelta64[ns]')] * 6 +
[np.dtype('datetime64[ns]')] * 2,
index=list('ABCDEFGH'))
assert_series_equal(result, expected)
def test_at_time_between_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq='30min')
df = DataFrame(randn(len(index), 5), index=index)
akey = time(12, 0, 0)
bkey = slice(time(13, 0, 0), time(14, 0, 0))
ainds = [24, 72, 120, 168]
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
result = df.at_time(akey)
expected = df.loc[akey]
expected2 = df.iloc[ainds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
assert len(result) == 4
result = df.between_time(bkey.start, bkey.stop)
expected = df.loc[bkey]
expected2 = df.iloc[binds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
assert len(result) == 12
result = df.copy()
result.loc[akey] = 0
result = result.loc[akey]
expected = df.loc[akey].copy()
expected.loc[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.loc[akey] = 0
result.loc[akey] = df.iloc[ainds]
assert_frame_equal(result, df)
result = df.copy()
result.loc[bkey] = 0
result = result.loc[bkey]
expected = df.loc[bkey].copy()
expected.loc[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.loc[bkey] = 0
result.loc[bkey] = df.iloc[binds]
assert_frame_equal(result, df)
def test_xs(self):
idx = self.frame.index[5]
xs = self.frame.xs(idx)
for item, value in compat.iteritems(xs):
if np.isnan(value):
assert np.isnan(self.frame[item][idx])
else:
assert value == self.frame[item][idx]
# mixed-type xs
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data)
xs = frame.xs('1')
assert xs.dtype == np.object_
assert xs['A'] == 1
assert xs['B'] == '1'
with pytest.raises(KeyError):
self.tsframe.xs(self.tsframe.index[0] - BDay())
# xs get column
series = self.frame.xs('A', axis=1)
expected = self.frame['A']
assert_series_equal(series, expected)
# view is returned if possible
series = self.frame.xs('A', axis=1)
series[:] = 5
assert (expected == 5).all()
def test_xs_corner(self):
# pathological mixed-type reordering case
df = DataFrame(index=[0])
df['A'] = 1.
df['B'] = 'foo'
df['C'] = 2.
df['D'] = 'bar'
df['E'] = 3.
xs = df.xs(0)
exp = pd.Series([1., 'foo', 2., 'bar', 3.],
index=list('ABCDE'), name=0)
tm.assert_series_equal(xs, exp)
# no columns but Index(dtype=object)
df = DataFrame(index=['a', 'b', 'c'])
result = df.xs('a')
expected = Series([], name='a', index=pd.Index([], dtype=object))
assert_series_equal(result, expected)
def test_xs_duplicates(self):
df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])
cross = df.xs('c')
exp = df.iloc[2]
assert_series_equal(cross, exp)
def test_xs_keep_level(self):
df = (DataFrame({'day': {0: 'sat', 1: 'sun'},
'flavour': {0: 'strawberry', 1: 'strawberry'},
'sales': {0: 10, 1: 12},
'year': {0: 2008, 1: 2008}})
.set_index(['year', 'flavour', 'day']))
result = df.xs('sat', level='day', drop_level=False)
expected = df[:1]
assert_frame_equal(result, expected)
result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False)
assert_frame_equal(result, expected)
def test_xs_view(self):
# in 0.14 this will return a view if possible a copy otherwise, but
# this is numpy dependent
dm = DataFrame(np.arange(20.).reshape(4, 5),
index=lrange(4), columns=lrange(5))
dm.xs(2)[:] = 10
assert (dm.xs(2) == 10).all()
def test_index_namedtuple(self):
from collections import namedtuple
IndexType = namedtuple("IndexType", ["a", "b"])
idx1 = IndexType("foo", "bar")
idx2 = IndexType("baz", "bof")
index = Index([idx1, idx2],
name="composite_index", tupleize_cols=False)
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
with catch_warnings(record=True):
result = df.ix[IndexType("foo", "bar")]["A"]
assert result == 1
result = df.loc[IndexType("foo", "bar")]["A"]
assert result == 1
def test_boolean_indexing(self):
idx = lrange(3)
cols = ['A', 'B', 'C']
df1 = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, 2.5],
[3.0, 3.5, 4.0]],
dtype=float))
df2 = DataFrame(index=idx, columns=cols,
data=np.ones((len(idx), len(cols))))
expected = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, -1],
[-1, -1, -1]], dtype=float))
df1[df1 > 2.0 * df2] = -1
assert_frame_equal(df1, expected)
with tm.assert_raises_regex(ValueError, 'Item wrong length'):
df1[df1.index[:-1] > 2] = -1
def test_boolean_indexing_mixed(self):
df = DataFrame({
long(0): {35: np.nan, 40: np.nan, 43: np.nan,
49: np.nan, 50: np.nan},
long(1): {35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139},
long(2): {35: np.nan, 40: np.nan, 43: 0.29012581014105987,
49: np.nan, 50: np.nan},
long(3): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan,
50: np.nan},
long(4): {35: 0.34215328467153283, 40: np.nan, 43: np.nan,
49: np.nan, 50: np.nan},
'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
assert_frame_equal(df2, expected)
df['foo'] = 'test'
with tm.assert_raises_regex(TypeError, 'boolean setting '
'on mixed-type'):
df[df > 0.3] = 1
def test_where(self):
default_frame = DataFrame(np.random.randn(5, 3),
columns=['A', 'B', 'C'])
def _safe_add(df):
# only add to the numeric items
def is_ok(s):
return (issubclass(s.dtype.type, (np.integer, np.floating)) and
s.dtype != 'uint8')
return DataFrame(dict([(c, s + 1) if is_ok(s) else (c, s)
for c, s in compat.iteritems(df)]))
def _check_get(df, cond, check_dtypes=True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
for k, v in rs.iteritems():
exp = Series(
np.where(cond[k], df[k], other1[k]), index=v.index)
assert_series_equal(v, exp, check_names=False)
assert_frame_equal(rs, rs2)
# dtypes
if check_dtypes:
assert (rs.dtypes == df.dtypes).all()
# check getting
for df in [default_frame, self.mixed_frame,
self.mixed_float, self.mixed_int]:
cond = df > 0
_check_get(df, cond)
# upcasting case (GH # 2794)
df = DataFrame(dict([(c, Series([1] * 3, dtype=c))
for c in ['int64', 'int32',
'float32', 'float64']]))
df.iloc[1, :] = 0
result = df.where(df >= 0).get_dtype_counts()
# when we don't preserve boolean casts
#
# expected = Series({ 'float32' : 1, 'float64' : 3 })
expected = Series({'float32': 1, 'float64': 1, 'int32': 1, 'int64': 1})
assert_series_equal(result, expected)
# aligning
def _check_align(df, cond, other, check_dtypes=True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
if is_scalar(other):
o = other
else:
if isinstance(other, np.ndarray):
o = Series(other[:, i], index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values, index=result.index, name=k)
# since we can't always have the correct numpy dtype
# as numpy doesn't know how to downcast, don't check
assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other, np.ndarray):
assert (rs.dtypes == df.dtypes).all()
for df in [self.mixed_frame, self.mixed_float, self.mixed_int]:
# other is a frame
cond = (df > 0)[1:]
_check_align(df, cond, _safe_add(df))
# check other is ndarray
cond = df > 0
_check_align(df, cond, (_safe_add(df).values))
# integers are upcast, so don't check the dtypes
cond = df > 0
check_dtypes = all([not issubclass(s.type, np.integer)
for s in df.dtypes])
_check_align(df, cond, np.nan, check_dtypes=check_dtypes)
# invalid conditions
df = default_frame
err1 = (df + 1).values[0:2, :]
pytest.raises(ValueError, df.where, cond, err1)
err2 = cond.iloc[:2, :].values
other1 = _safe_add(df)
pytest.raises(ValueError, df.where, err2, other1)
pytest.raises(ValueError, df.mask, True)
pytest.raises(ValueError, df.mask, 0)
# where inplace
def _check_set(df, cond, check_dtypes=True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
expected = dfi.mask(~econd)
dfi.where(cond, np.nan, inplace=True)
assert_frame_equal(dfi, expected)
# dtypes (and confirm upcasts)x
if check_dtypes:
for k, v in compat.iteritems(df.dtypes):
if issubclass(v.type, np.integer) and not cond[k].all():
v = np.dtype('float64')
assert dfi[k].dtype == v
for df in [default_frame, self.mixed_frame, self.mixed_float,
self.mixed_int]:
cond = df > 0
_check_set(df, cond)
cond = df >= 0
_check_set(df, cond)
# aligining
cond = (df >= 0)[1:]
_check_set(df, cond)
# GH 10218
# test DataFrame.where with Series slicing
df = DataFrame({'a': range(3), 'b': range(4, 7)})
result = df.where(df['a'] == 1)
expected = df[df['a'] == 1].reindex(df.index)
assert_frame_equal(result, expected)
def test_where_array_like(self):
# see gh-15414
klasses = [list, tuple, np.array]
df = DataFrame({'a': [1, 2, 3]})
cond = [[False], [True], [True]]
expected = DataFrame({'a': [np.nan, 2, 3]})
for klass in klasses:
result = df.where(klass(cond))
assert_frame_equal(result, expected)
df['b'] = 2
expected['b'] = [2, np.nan, 2]
cond = [[False, True], [True, False], [True, True]]
for klass in klasses:
result = df.where(klass(cond))
assert_frame_equal(result, expected)
def test_where_invalid_input(self):
# see gh-15414: only boolean arrays accepted
df = DataFrame({'a': [1, 2, 3]})
msg = "Boolean array expected for the condition"
conds = [
[[1], [0], [1]],
Series([[2], [5], [7]]),
DataFrame({'a': [2, 5, 7]}),
[["True"], ["False"], ["True"]],
[[Timestamp("2017-01-01")],
[pd.NaT], [Timestamp("2017-01-02")]]
]
for cond in conds:
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
df['b'] = 2
conds = [
[[0, 1], [1, 0], [1, 1]],
Series([[0, 2], [5, 0], [4, 7]]),
[["False", "True"], ["True", "False"],
["True", "True"]],
DataFrame({'a': [2, 5, 7], 'b': [4, 8, 9]}),
[[pd.NaT, Timestamp("2017-01-01")],
[Timestamp("2017-01-02"), pd.NaT],
[Timestamp("2017-01-03"), Timestamp("2017-01-03")]]
]
for cond in conds:
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
def test_where_dataframe_col_match(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = DataFrame([[True, False, True], [False, False, True]])
out = df.where(cond)
expected = DataFrame([[1.0, np.nan, 3], [np.nan, np.nan, 6]])
tm.assert_frame_equal(out, expected)
cond.columns = ["a", "b", "c"] # Columns no longer match.
msg = "Boolean array expected for the condition"
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
def test_where_ndframe_align(self):
msg = "Array conditional must be same shape as self"
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = [True]
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
expected = DataFrame([[1, 2, 3], [np.nan, np.nan, np.nan]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
cond = np.array([False, True, False, True])
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
expected = DataFrame([[np.nan, np.nan, np.nan], [4, 5, 6]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
def test_where_bug(self):
# GH 2793
df = DataFrame({'a': [1.0, 2.0, 3.0, 4.0], 'b': [
4.0, 3.0, 2.0, 1.0]}, dtype='float64')
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [
4.0, 3.0, np.nan, np.nan]}, dtype='float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# mixed
for dtype in ['int16', 'int8', 'int32', 'int64']:
df = DataFrame({'a': np.array([1, 2, 3, 4], dtype=dtype),
'b': np.array([4.0, 3.0, 2.0, 1.0],
dtype='float64')})
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0],
'b': [4.0, 3.0, np.nan, np.nan]},
dtype='float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# transpositional issue
# GH7506
a = DataFrame({0: [1, 2], 1: [3, 4], 2: [5, 6]})
b = DataFrame({0: [np.nan, 8], 1: [9, np.nan], 2: [np.nan, np.nan]})
do_not_replace = b.isnull() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
assert_frame_equal(result, expected)
a = DataFrame({0: [4, 6], 1: [1, 0]})
b = DataFrame({0: [np.nan, 3], 1: [3, np.nan]})
do_not_replace = b.isnull() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
assert_frame_equal(result, expected)
def test_where_datetime(self):
# GH 3311
df = DataFrame(dict(A=date_range('20130102', periods=5),
B=date_range('20130104', periods=5),
C=np.random.randn(5)))
stamp = datetime(2013, 1, 3)
result = df[df > stamp]
expected = df.copy()
expected.loc[[0, 1], 'A'] = np.nan
assert_frame_equal(result, expected)
def test_where_none(self):
# GH 4667
# setting with None changes dtype
df = DataFrame({'series': Series(range(10))}).astype(float)
df[df > 7] = None
expected = DataFrame(
{'series': Series([0, 1, 2, 3, 4, 5, 6, 7, np.nan, np.nan])})
assert_frame_equal(df, expected)
# GH 7656
df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {
'A': np.nan, 'B': 'Test', 'C': np.nan}])
expected = df.where(~isnull(df), None)
with tm.assert_raises_regex(TypeError, 'boolean setting '
'on mixed-type'):
df.where(~isnull(df), None, inplace=True)
def test_where_align(self):
def create():
df = DataFrame(np.random.randn(10, 3))
df.iloc[3:5, 0] = np.nan
df.iloc[4:6, 1] = np.nan
df.iloc[5:8, 2] = np.nan
return df
# series
df = create()
expected = df.fillna(df.mean())
result = df.where(pd.notnull(df), df.mean(), axis='columns')
assert_frame_equal(result, expected)
df.where(pd.notnull(df), df.mean(), inplace=True, axis='columns')
assert_frame_equal(df, expected)
df = create().fillna(0)
expected = df.apply(lambda x, y: x.where(x > 0, y), y=df[0])
result = df.where(df > 0, df[0], axis='index')
assert_frame_equal(result, expected)
result = df.where(df > 0, df[0], axis='rows')
assert_frame_equal(result, expected)
# frame
df = create()
expected = df.fillna(1)
result = df.where(pd.notnull(df), DataFrame(
1, index=df.index, columns=df.columns))
assert_frame_equal(result, expected)
def test_where_complex(self):
# GH 6345
expected = DataFrame(
[[1 + 1j, 2], [np.nan, 4 + 1j]], columns=['a', 'b'])
df = DataFrame([[1 + 1j, 2], [5 + 1j, 4 + 1j]], columns=['a', 'b'])
df[df.abs() >= 5] = np.nan
assert_frame_equal(df, expected)
def test_where_axis(self):
# GH 9736
df = DataFrame(np.random.randn(2, 2))
mask = DataFrame([[False, False], [False, False]])
s = Series([0, 1])
expected = DataFrame([[0, 0], [1, 1]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, 1], [0, 1]], dtype='float64')
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Upcast needed
df = DataFrame([[1, 2], [3, 4]], dtype='int64')
mask = DataFrame([[False, False], [False, False]])
s = Series([0, np.nan])
expected = DataFrame([[0, 0], [np.nan, np.nan]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, np.nan], [0, np.nan]], dtype='float64')
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
expected = DataFrame({0: np.array([0, 0], dtype='int64'),
1: np.array([np.nan, np.nan], dtype='float64')})
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Multiple dtypes (=> multiple Blocks)
df = pd.concat([DataFrame(np.random.randn(10, 2)),
DataFrame(np.random.randint(0, 10, size=(10, 2)))],
ignore_index=True, axis=1)
mask = DataFrame(False, columns=df.columns, index=df.index)
s1 = Series(1, index=df.columns)
s2 = Series(2, index=df.index)
result = df.where(mask, s1, axis='columns')
expected = DataFrame(1.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype(int)
expected[3] = expected[3].astype(int)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s1, axis='columns', inplace=True)
assert_frame_equal(result, expected)
result = df.where(mask, s2, axis='index')
expected = DataFrame(2.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype(int)
expected[3] = expected[3].astype(int)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s2, axis='index', inplace=True)
assert_frame_equal(result, expected)
# DataFrame vs DataFrame
d1 = df.copy().drop(1, axis=0)
expected = df.copy()
expected.loc[1, :] = np.nan
result = df.where(mask, d1)
assert_frame_equal(result, expected)
result = df.where(mask, d1, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True, axis='index')
assert_frame_equal(result, expected)
d2 = df.copy().drop(1, axis=1)
expected = df.copy()
expected.loc[:, 1] = np.nan
result = df.where(mask, d2)
assert_frame_equal(result, expected)
result = df.where(mask, d2, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True, axis='columns')
assert_frame_equal(result, expected)
def test_where_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.where(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[2, 3, 4], [5, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df > 4, df + 1))
# return ndarray and scalar
result = df.where(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[99, 2, 99], [4, 99, 6], [99, 8, 99]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df % 2 == 0, 99))
# chain
result = (df + 2).where(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[13, 14, 15], [16, 17, 18], [9, 10, 11]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result,
(df + 2).where((df + 2) > 8, (df + 2) + 10))
def test_mask(self):
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rs = df.where(cond, np.nan)
assert_frame_equal(rs, df.mask(df <= 0))
assert_frame_equal(rs, df.mask(~cond))
other = DataFrame(np.random.randn(5, 3))
rs = df.where(cond, other)
assert_frame_equal(rs, df.mask(df <= 0, other))
assert_frame_equal(rs, df.mask(~cond, other))
def test_mask_inplace(self):
# GH8801
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rdf = df.copy()
rdf.where(cond, inplace=True)
assert_frame_equal(rdf, df.where(cond))
assert_frame_equal(rdf, df.mask(~cond))
rdf = df.copy()
rdf.where(cond, -df, inplace=True)
assert_frame_equal(rdf, df.where(cond, -df))
assert_frame_equal(rdf, df.mask(~cond, -df))
def test_mask_edge_case_1xN_frame(self):
# GH4071
df = DataFrame([[1, 2]])
res = df.mask(DataFrame([[True, False]]))
expec = DataFrame([[nan, 2]])
assert_frame_equal(res, expec)
def test_mask_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.mask(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[1, 2, 3], [4, 6, 7], [8, 9, 10]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.mask(df > 4, df + 1))
# return ndarray and scalar
result = df.mask(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[1, 99, 3], [99, 5, 99], [7, 99, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.mask(df % 2 == 0, 99))
# chain
result = (df + 2).mask(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[3, 4, 5], [6, 7, 8], [19, 20, 21]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result,
(df + 2).mask((df + 2) > 8, (df + 2) + 10))
def test_head_tail(self):
assert_frame_equal(self.frame.head(), self.frame[:5])
assert_frame_equal(self.frame.tail(), self.frame[-5:])
assert_frame_equal(self.frame.head(0), self.frame[0:0])
assert_frame_equal(self.frame.tail(0), self.frame[0:0])
assert_frame_equal(self.frame.head(-1), self.frame[:-1])
assert_frame_equal(self.frame.tail(-1), self.frame[1:])
assert_frame_equal(self.frame.head(1), self.frame[:1])
assert_frame_equal(self.frame.tail(1), self.frame[-1:])
# with a float index
df = self.frame.copy()
df.index = np.arange(len(self.frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
assert_frame_equal(df.head(0), df[0:0])
assert_frame_equal(df.tail(0), df[0:0])
assert_frame_equal(df.head(-1), df.iloc[:-1])
assert_frame_equal(df.tail(-1), df.iloc[1:])
# test empty dataframe
empty_df = DataFrame()
assert_frame_equal(empty_df.tail(), empty_df)
assert_frame_equal(empty_df.head(), empty_df)
def test_type_error_multiindex(self):
# See gh-12218
df = DataFrame(columns=['i', 'c', 'x', 'y'],
data=[[0, 0, 1, 2], [1, 0, 3, 4],
[0, 1, 1, 2], [1, 1, 3, 4]])
dg = df.pivot_table(index='i', columns='c',
values=['x', 'y'])
with tm.assert_raises_regex(TypeError, "is an invalid key"):
str(dg[:, 0])
index = Index(range(2), name='i')
columns = MultiIndex(levels=[['x', 'y'], [0, 1]],
labels=[[0, 1], [0, 0]],
names=[None, 'c'])
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
assert_frame_equal(result, expected)
name = ('x', 0)
index = Index(range(2), name='i')
expected = Series([1, 3], index=index, name=name)
result = dg['x', 0]
assert_series_equal(result, expected)
class TestDataFrameIndexingDatetimeWithTZ(TestData):
def setup_method(self, method):
self.idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
self.dr = date_range('20130110', periods=3)
self.df = DataFrame({'A': self.idx, 'B': self.dr})
def test_setitem(self):
df = self.df
idx = self.idx
# setitem
df['C'] = idx
assert_series_equal(df['C'], Series(idx, name='C'))
df['D'] = 'foo'
df['D'] = idx
assert_series_equal(df['D'], Series(idx, name='D'))
del df['D']
# assert that A & C are not sharing the same base (e.g. they
# are copies)
b1 = df._data.blocks[1]
b2 = df._data.blocks[2]
assert b1.values.equals(b2.values)
assert id(b1.values.values.base) != id(b2.values.values.base)
# with nan
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2['B']
assert_series_equal(notnull(result), Series(
[True, False, True], name='B'))
assert_series_equal(df2.dtypes, df.dtypes)
def test_set_reset(self):
idx = self.idx
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
result = df.reset_index()
assert result['foo'].dtype, 'M8[ns, US/Eastern'
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
def test_transpose(self):
result = self.df.T
expected = DataFrame(self.df.values.T)
expected.index = ['A', 'B']
assert_frame_equal(result, expected)
class TestDataFrameIndexingUInt64(TestData):
def setup_method(self, method):
self.ir = Index(np.arange(3), dtype=np.uint64)
self.idx = Index([2**63, 2**63 + 5, 2**63 + 10], name='foo')
self.df = DataFrame({'A': self.idx, 'B': self.ir})
def test_setitem(self):
df = self.df
idx = self.idx
# setitem
df['C'] = idx
assert_series_equal(df['C'], Series(idx, name='C'))
df['D'] = 'foo'
df['D'] = idx
assert_series_equal(df['D'], Series(idx, name='D'))
del df['D']
# With NaN: because uint64 has no NaN element,
# the column should be cast to object.
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2['B']
assert_series_equal(notnull(result), Series(
[True, False, True], name='B'))
assert_series_equal(df2.dtypes, Series([np.dtype('uint64'),
np.dtype('O'), np.dtype('O')],
index=['A', 'B', 'C']))
def test_set_reset(self):
idx = self.idx
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
result = df.reset_index()
assert result['foo'].dtype == np.dtype('uint64')
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
def test_transpose(self):
result = self.df.T
expected = DataFrame(self.df.values.T)
expected.index = ['A', 'B']
assert_frame_equal(result, expected)
| agpl-3.0 |
CCBatIIT/bayesian-itc | bitc/experiments.py | 2 | 36275 | """
Contains Experiment and Injection classes.
"""
import os
import logging
import numpy
from pint import DimensionalityError
from bitc.units import ureg, Quantity
# Use logger with name of module
logger = logging.getLogger(__name__)
class Injection(object):
"""
Data from a single injection.
Several types of information are stored about each injection:
* the ordinal number of the injection
* the programmed volume of the injection
* duration of the injection
* time between the beginning of the injection and the beginning of the next injection
* filtering period over which data channel is averaged to produce a single measurement of applied power
EXAMPLES
"""
# TODO Add docstring examples.
def __init__(self, number, volume, duration, spacing, filter_period, evolved_heat=None, titrant_amount=None, titrant_concentration=None):
# sequence number of injection
self.number = number
# programmed volume of injection
self.volume = volume
# duration of injection
self.duration = duration
# time between beginning of injection and beginning of next injection
self.spacing = spacing
# time over which data channel is averaged to produce a single measurement
# of applied power
self.filter_period = filter_period
# If provided, set the evolved_heat, making sure the unit is compatible
# with microcalorie
if evolved_heat:
self.evolved_heat = evolved_heat.to('microcalorie')
# the quantity of compound(s) injected
if titrant_amount:
self.titrant = titrant_amount
elif titrant_concentration:
self.contents(titrant_concentration)
else:
TypeError(
"Need to specify either a titrant amount, or a concentration")
def contents(self, titrant_concentration):
"""
Define the contents of what was injected
Takes a list/array of concentrations
"""
# Concentration of syringe contents
self.titrant_concentration = Quantity(
numpy.array(titrant_concentration), ureg.millimole / ureg.liter)
self.titrant = Quantity(
numpy.zeros(self.titrant_concentration.size), ureg.millimole)
for titr in range(self.titrant_concentration.size):
# Amount of titrant in the syringe (mole)
if titr == 0:
self.titrant[titr] = self.volume * self.titrant_concentration
else:
self.titrant[titr] = self.volume * self.titrant_concentration[titr]
class BaseExperiment(object):
"""
Abstract base class for an ITC experiment
"""
def __init__(self, data_source, experiment_name, instrument):
"""
Base init, prepare all the variables
:param data_source:
:type data_source: str
:param experiment_name:
:type experiment_name: str
:return:
:rtype:
"""
# Initialize.
# the source filename from which data is read
self.data_filename = None
self.instrument = instrument # the instrument that was used
self.number_of_injections = None # number of syringe injections
self.target_temperature = None # target temperature
# initial equilibration (delay) time before injections
self.equilibration_time = None
self.stir_speed = None # rate of stirring
self.reference_power = None # power applied to reference cell
# concentrations of various species in syringe
self.syringe_contents = None
# concentrations of various species in sample cell
self.sample_cell_contents = None
self.cell_volume = instrument.V0 # volume of liquid in sample cell
# list of injections (and their associated data)
self.injections = None
# time at end of filtering period
self.filter_period_end_time = None
# time at midpoint of filtering period
self.filter_period_midpoint_time = None
# "differential" power applied to sample cell
self.differential_power = None
self.cell_temperature = None # cell temperature
self.name = experiment_name
self.data_source = data_source
# Extract and store data about the experiment.
self.number_of_injections = None
self.target_temperature = None
self.equilibration_time = None
self.stir_rate = None
self.reference_power = None
# Store additional data about experiment.
self.syringe_concentration = None
# supposed concentration of receptor in cell
self.cell_concentration = None
# Allocate storage for power measurements.
self.time = None
self.heat = None
self.temperature = None
# Store data about measured heat liberated during each injection.
# time at end of filtering period (s)
self.filter_period_end_time = None
# "differential" power applied to sample cell (ucal/s)
self.differential_power = None
self.cell_temperature = None # cell temperature (K)
self.jacket_temperature = None # adiabatic jacket temperature (K)
def __str__(self):
"""
Show details of experiment in human-readable form.
"""
# TODO Clean up this definition
string = ""
string += "EXPERIMENT\n"
string += "\n"
string += "Source filename: %s\n" % self.data_filename
string += "Number of injections: %d\n" % self.number_of_injections
string += "Target temperature: %.1f K\n" % (
self.target_temperature / ureg.kelvin)
try:
string += "Equilibration time before first injection: %.1f s\n" % (
self.equilibration_time / ureg.second)
except TypeError:
string += "Equilibration time unknown"
# TODO temporary, needs to be uniform type among all experiment classes
if isinstance(self.syringe_concentration, Quantity):
string += "Syringe concentration: %.3f mM\n" % (self.syringe_concentration / (ureg.millimole / ureg.liter))
if isinstance(self.cell_concentration, Quantity):
string += "Cell concentration: %.3f mM\n" % (self.cell_concentration / (ureg.millimole / ureg.liter))
string += "Cell volume: %.3f ml\n" % (
self.cell_volume / ureg.milliliter)
if isinstance(self.cell_concentration, Quantity):
string += "Reference power: %.3f ucal/s\n" % (self.reference_power / (ureg.microcalorie / ureg.second))
string += "\n"
string += "INJECTIONS\n"
string += "\n"
string += "%16s %24s %24s %24s %24s %24s\n" % (
'injection',
'volume (uL)',
'duration (s)',
'collection time (s)',
'time step (s)',
'evolved heat (ucal)'
)
# for injection in range(self.number_of_injections):
# string += "%16d %16.3f %16.3f %16.3f %16.3f" % (injection, self.injection_volume[injection] / unit.microliter, self.injection_duration[injection] / unit.second, self.collection_time[injection] / unit.second, self.time_step[injection] / unit.second)
for injection in self.injections:
string += "%16d %24.3f %24.3f %24.3f %24.3f %24.3f\n" % (
injection.number,
injection.volume /
ureg.microliter, injection.duration / ureg.second,
injection.spacing / ureg.second, injection.filter_period /
ureg.second, injection.evolved_heat / ureg.microcalorie)
return string
def write_integrated_heats(self, filename):
"""
Write integrated heats in a format similar to that used by Origin.
"""
DeltaV = self.injections[0].volume
V0 = self.cell_volume
P0 = self.cell_concentration
Ls = self.syringe_concentration
string = "%12s %5s %12s %12s %12s %12s\n" % ("DH", "INJV", "Xt", "Mt", "XMt", "NDH")
for (n, injection) in enumerate(self.injections):
# Instantaneous injection model (perfusion)
# d = 1.0 - (DeltaV / V0) # dilution factor (dimensionless)
# P = V0 * P0 * d**(n+1) # total quantity of protein in sample cell after n injections (mol)
# L = V0 * Ls * (1. - d**(n+1)) # total quantity of ligand in sample cell after n injections (mol)
# PLn = 0.5/V0 * ((P + L + Kd*V0) - numpy.sqrt((P + L + Kd*V0)**2 - 4*P*L)); # complex concentration (M)
# Pn = P/V0 - PLn; # free protein concentration in sample cell after n injections (M)
# Ln = L/V0 - PLn; # free ligand concentration in sample cell after
# n injections (M)
Pn = 0.0 * (ureg.millimole / ureg.liter)
Ln = 0.0 * (ureg.millimole / ureg.liter)
PLn = 0.0 * (ureg.millimole / ureg.liter)
NDH = 0.0 # review Not sure what this is
# Form string.
string += "%12.5f %5.1f %12.5f %12.5f %12.5f %12.5f\n" % (
injection.evolved_heat / ureg.microcalorie, injection.volume /
ureg.microliter, Pn /
(ureg.millimole / ureg.liter), Ln /
(ureg.millimole / ureg.liter),
PLn / (ureg.millimole / ureg.liter), NDH)
# Final line.
string += " -- %12.5f %12.5f --\n" % (
Pn / (ureg.millimole / ureg.liter), Ln / (ureg.millimole / ureg.liter))
# Write file contents.
outfile = open(filename, 'w')
outfile.write(string)
outfile.close()
return
def write_heats_csv(self, filename):
"""
Write integrated heats in a csv format
"""
DeltaV = self.injections[0].volume
V0 = self.cell_volume
P0 = self.cell_concentration
Ls = self.syringe_concentration
string = "%12s, %5s, %12s, %12s, %12s, %12s\n" % (
"DH", "INJV", "Xt", "Mt", "XMt", "NDH")
for (n, injection) in enumerate(self.injections):
# Instantaneous injection model (perfusion)
# d = 1.0 - (DeltaV / V0) # dilution factor (dimensionless)
# P = V0 * P0 * d**(n+1) # total quantity of protein in sample cell after n injections (mol)
# L = V0 * Ls * (1. - d**(n+1)) # total quantity of ligand in sample cell after n injections (mol)
# PLn = 0.5/V0 * ((P + L + Kd*V0) - numpy.sqrt((P + L + Kd*V0)**2 - 4*P*L)); # complex concentration (M)
# Pn = P/V0 - PLn; # free protein concentration in sample cell after n injections (M)
# Ln = L/V0 - PLn; # free ligand concentration in sample cell after
# n injections (M)
Pn = 0.0 * (ureg.millimole / ureg.liter)
Ln = 0.0 * (ureg.millimole / ureg.liter)
PLn = 0.0 * (ureg.millimole / ureg.liter)
NDH = 0.0 # review Not sure what this is
# Form string.
string += "%12.5f %5.1f %12.5f %12.5f %12.5f %12.5f\n" % (
injection.evolved_heat / ureg.microcalorie, injection.volume /
ureg.microliter, Pn /
(ureg.millimole / ureg.liter), Ln /
(ureg.millimole / ureg.liter),
PLn / (ureg.millimole / ureg.liter), NDH)
# Final line.
string += " -- %12.5f %12.5f --\n" % (Pn / (ureg.millimole / ureg.liter), Ln / (ureg.millimole / ureg.liter))
# Write file contents.
outfile = open(filename, 'w')
outfile.write(string)
outfile.close()
return
# TODO do we want all the details, including volumes?
def read_integrated_heats(self, heats_file, unit='microcalorie'):
"""
Read integrated heats from an origin file
:param heats_file:
:type heats_file:
:return:
:rtype:
"""
heats = self._parse_heats(heats_file, unit)
if heats.size != self.number_of_injections:
raise ValueError("The number of injections does not match the number of integrated heats in %s" % heats_file)
for inj, heat in enumerate(heats):
self.injections[inj].evolved_heat = heat
@staticmethod
def _parse_heats(heats_file, unit):
"""
Take as input a file with heats, format specification. Output a list of integrated heats in units of microcalorie
:param heats_file:
:type heats_file:
:param write_heats_compatible:
:type write_heats_compatible:
:return:
:rtype:
"""
import pandas as pd
assert isinstance(heats_file, str)
# Need python engine for skip_footer
dataframe = pd.read_table(heats_file, skip_footer=1, engine='python')
heats = numpy.array(dataframe['DH'])
return Quantity(heats, unit)
class ExperimentMicroCal(BaseExperiment):
"""
Data from an ITC experiment.
The experiment consists of several types of data:
* the instrument that was used
* experimental conditions (temperature, stir speed, etc.)
* concentrations of various components in syringe and sample cell
* injection volumes and durations, collection times
* time record of applied power and temperature difference
"""
# TODO Add type verification
def __init__(self, data_filename, experiment_name, instrument):
"""
Initialize an experiment from a Microcal VP-ITC formatted .itc file.
ARGUMENTS
data_filename (String) - the filename of the Microcal VP-ITC formatted .itc file to initialize the experiment from
TODO
* Add support for other formats of datafiles (XML, etc.).
"""
# Initialize.
super(ExperimentMicroCal, self).__init__(data_filename, experiment_name, instrument)
# the source filename from which data is read
# concentrations of various species in syringe
self.syringe_contents = list()
# concentrations of various species in sample cell
self.sample_cell_contents = list()
# list of injections (and their associated data)
self.injections = list()
# time at end of filtering period
# cell temperature
self.name = experiment_name
# Check to make sure we can access the file.
if not os.access(data_filename, os.R_OK):
raise "The file '%s' cannot be opened." % data_filename
# Open the file and read is contents.
infile = open(data_filename, 'r')
lines = infile.readlines()
infile.close()
# Check the header to make sure it is a VP-ITC text-formatted .itc
# file.
if lines[0][0:4] != '$ITC':
raise "File '%s' doesn't appear to be a Microcal VP-ITC data file." % data_filename
# Store the datafile filename.
self.data_filename = data_filename
# Extract and store data about the experiment.
self.number_of_injections = int(lines[1][1:].strip())
self.target_temperature = (int(lines[3][1:].strip()) + 273.15) * ureg.kelvin # convert from C to K
self.equilibration_time = int(lines[4][1:].strip()) * ureg.second
self.stir_rate = int(lines[5][1:].strip()) * ureg.revolutions_per_minute
self.reference_power = float(lines[6][1:].strip()) * ureg.microcalorie / ureg.second
# Extract and store metadata about injections.
injection_number = 0
for line in lines[10:]:
if line[0] == '$':
# Increment injection counter.
injection_number += 1
# Read data about injection.
(injection_volume,
injection_duration,
spacing,
filter_period) = line[1:].strip().split(",")
# Extract data for injection and apply appropriate unit
# conversions.
injectiondict = dict()
injectiondict['number'] = injection_number
injectiondict['volume'] = float(injection_volume) * ureg.microliter
injectiondict['duration'] = float(injection_duration) * ureg.second
# time between beginning of injection and beginning of next injection
injectiondict['spacing'] = float(spacing) * ureg.second
# time over which data channel is averaged to produce a single measurement
injectiondict['filter_period'] = float(filter_period) * ureg.second
self.injections.append(Injection(**injectiondict))
else:
break
# Store additional data about experiment.
parsecline = 11 + self.number_of_injections
# supposed concentration of compound in syringe
self.syringe_concentration = {'ligand': float(lines[parsecline][1:].strip()) * ureg.millimole / ureg.liter}
for inj in self.injections:
# TODO add support for multiple components
inj.contents(sum(self.syringe_concentration.values()))
# supposed concentration of receptor in cell
self.cell_concentration = {'macromolecule': float(lines[parsecline + 1][1:].strip()) * ureg.millimole / ureg.liter}
self.cell_volume = float(lines[parsecline + 2][1:].strip()) * ureg.milliliter # cell volume
self.injection_tick = [0]
# Allocate storage for power measurements.
self.time = list()
self.heat = list()
self.temperature = list()
# Extract lines containing heat measurements.
for (index, line) in enumerate(lines):
if line[:2] == '@0':
break
measurement_lines = lines[index:]
# Count number of power measurements.
nmeasurements = 0
for line in measurement_lines:
if line[0] != '@':
nmeasurements += 1
logger.info("There are %d power measurements." % nmeasurements)
# Store data about measured heat liberated during each injection.
# time at end of filtering period (s)
self.filter_period_end_time = ureg.Quantity(numpy.zeros([nmeasurements], numpy.float64), ureg.second)
# "differential" power applied to sample cell (ucal/s)
self.differential_power = ureg.Quantity(numpy.zeros([nmeasurements], numpy.float64), ureg.microcalorie / ureg.second)
# cell temperature (K)
self.cell_temperature = ureg.Quantity(numpy.zeros([nmeasurements], numpy.float64), ureg.kelvin)
# adiabatic jacket temperature (K)
self.jacket_temperature = ureg.Quantity(numpy.zeros([nmeasurements], numpy.float64), ureg.kelvin)
# Process data.
# TODO this is a mess, need to clean up and do proper input
# verification
nmeasurements = 0
injection_labels = list()
for (index, line) in enumerate(measurement_lines):
if line[0] == '@':
injection_labels.append(nmeasurements)
else:
# Extract data for power measurement.
# TODO: Auto-detect file format?
#
jacket_temperature = 0.0
try:
(time,
power,
temperature,
a,
jacket_temperature,
c,
d,
e,
f) = line.strip().split(",") # Berkeley Auto iTC-200
except:
try:
# works with Shoichet lab VP-ITC .itc files---what are other readings (a,b,c,d)?
(time,
power,
temperature,
a,
jacket_temperature,
c,
d) = line.strip().split(",")
# b looks like adiabatic jacket temperature (~1 degree C below sample temperature)
except:
# works with David Minh's VP-ITC .itc files
(time, power, temperature) = line.strip().split(",")
# Store data about this measurement.
self.filter_period_end_time[nmeasurements] = float(time) * ureg.second
self.differential_power[nmeasurements] = float(power) * ureg.microcalorie / ureg.second
self.cell_temperature[nmeasurements] = (float(temperature) + 273.15) * ureg.kelvin
self.jacket_temperature[nmeasurements] = (float(jacket_temperature) + 273.15) * ureg.kelvin
nmeasurements += 1
# number of injections read, not including @0
number_of_injections_read = len(injection_labels) - 1
# Perform a self-consistency check on the data to make sure all injections are accounted for.
if number_of_injections_read != self.number_of_injections:
logger.warning("Number of injections read (%d) is not equal to number of injections declared (%d)." % (number_of_injections_read, self.number_of_injections) +
"This is usually a sign that the experimental run was terminated prematurely." +
"The analysis will not include the final %d injections declared." % (self.number_of_injections - number_of_injections_read))
# Remove extra injections.
self.injections = self.injections[0:number_of_injections_read]
self.number_of_injections = number_of_injections_read
logger.debug("self.injections has %d elements" % (len(self.injections)))
# Annotate list of injections.
for injection in self.injections:
injection_number = injection.number
logger.debug("%5d %8d" % (injection_number, injection_labels[injection_number]))
injection.first_index = injection_labels[injection_number]
if injection_number < len(injection_labels) - 1:
injection.last_index = injection_labels[
injection_number + 1] - 1
else:
injection.last_index = nmeasurements - 1
# Fit baseline.
self.fit_gaussian_process_baseline()
# Integrate heat evolved from each injection.
self.integrate_heat()
return
def write_power(self, filename):
"""
DEBUG: Write power.
"""
outfile = open(filename, 'w')
outfile.write("%%%7s %16s %16s\n" % ('time (s)', 'heat (ucal/s)', 'temperature (K)'))
for index in range(len(self.filter_period_end_time)):
outfile.write("%8.1f %16.8f %16.8f\n" % (self.filter_period_end_time[index] / ureg.second,
self.differential_power[index] / (ureg.microcalorie / ureg.second),
self.cell_temperature[index] / ureg.kelvin
)
)
outfile.close()
return
@staticmethod
def _plot_confidence_interval(axes, full_x, sigma, y_pred):
# Confidence interval
axes.fill(numpy.concatenate([full_x, full_x[::-1]]),
numpy.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]
]),
alpha=.7, fc='black', ec='None', label='95% confidence interval')
def _plot_gaussian_baseline(self, full_x, full_y, sigma, x, y, y_pred):
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
figure = Figure()
canvas = FigureCanvas(figure)
axes = figure.add_subplot(1, 1, 1, axisbg='whitesmoke')
# Adds a 95% confidence interval to the plot
ExperimentMicroCal._plot_confidence_interval(axes, full_x, sigma, y_pred)
# Entire set of data
axes.plot(full_x, full_y, 'o', markersize=2, lw=1, color='deepskyblue', alpha=.5, label='Raw data')
# Points for fit
axes.plot(x, y, 'o', color='crimson', markersize=2, alpha=.8, label='Fitted data')
# Prediction
axes.plot(full_x, y_pred, 'o', markersize=1, mec='w', mew=1, color='k', alpha=.5, label='Predicted baseline')
# Plot injection time markers.
[ymin, ymax] = axes.get_ybound()
for injection in self.injections:
# timepoint at start of syringe injection
last_index = injection.first_index
t = self.filter_period_end_time[last_index] / ureg.second
axes.plot([t, t], [ymin, ymax], '-', color='crimson')
# Adjust axis to zoom in on baseline.
ymax = self.baseline_power.max() / (ureg.microcalorie / ureg.second)
ymin = self.baseline_power.min() / (ureg.microcalorie / ureg.second)
width = ymax - ymin
ymax += width / 2
ymin -= width / 2
axes.set_ybound(ymin, ymax)
axes.set_xlabel('time (s)')
axes.set_ylabel(r'differential power ($\mu$cal / s)')
axes.legend(loc='upper center', bbox_to_anchor=(0.5, 0.1), ncol=4, fancybox=True, shadow=True, markerscale=3, prop={'size': 6})
axes.set_title(self.data_filename)
canvas.print_figure(self.name + '-baseline.png', dpi=500)
def _plot_baseline_subtracted(self, x, y, raw=True, baseline=True):
"""Plot the baseline-subtracted data"""
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
figure = Figure()
canvas = FigureCanvas(figure)
axes1 = figure.add_subplot(1, 1, 1, axisbg='whitesmoke')
# Points for fit
axes1.plot(x, y, 'o', color='deepskyblue', markersize=2, alpha=1, label='Baseline-subtracted data')
axes1.set_xlabel('time (s)')
axes1.set_ylabel(r' corr. differential power ($\mu$cal / s)')
axes1.legend(loc='upper center', bbox_to_anchor=(0.2, 0.95), ncol=1, fancybox=True, shadow=True, markerscale=3, prop={'size': 6})
if raw:
axes2 = axes1.twinx()
axes2.plot(x, self.differential_power, 'o', color='gray', markersize=2, alpha=.3, label='Raw data')
axes2.set_ylabel(r'raw differential power ($\mu$cal / s)')
axes2.legend(loc='upper center', bbox_to_anchor=(0.8, 0.95), ncol=1, fancybox=True, shadow=True, markerscale=3, prop={'size': 6})
if baseline:
axes2.plot(x, self.baseline_power, '-', color='black', alpha=.3, label='baseline')
axes1.set_title(self.data_filename)
canvas.print_figure(self.name + '-subtracted.png', dpi=500)
def _retrieve_fit_indices(self, frac):
"""Form list of data to fit.
"""
x = list()
y = list()
fit_indices = list()
# Add data prior to first injection
for index in range(0, self.injections[0].first_index):
x.append(self.filter_period_end_time[index] / ureg.second)
y.append(self.differential_power[index] / (ureg.microcalorie / ureg.second))
fit_indices.append(index)
# Add last x% of each injection.
for injection in self.injections:
start_index = injection.first_index
end_index = injection.last_index + 1
start_index = end_index - int((end_index - start_index) * frac)
for index in range(start_index, end_index):
x.append(self.filter_period_end_time[index] / ureg.second)
y.append(self.differential_power[index] / (ureg.microcalorie / ureg.second))
fit_indices.append(index)
x = numpy.array(x)
y = numpy.array(y)
fit_indices = numpy.array(fit_indices)
return fit_indices, x, y
def fit_gaussian_process_baseline(self, frac=0.3, theta0=4.7, nugget=1.0, plot=True):
"""
Gaussian Process fit of baseline.
frac = fraction of baseline to use for fit
:return:
:rtype:
"""
from sklearn import gaussian_process
# Retrieve a reduced set of data
# (data up until first injection and x percent before every injection)
fit_indices, x, y = self._retrieve_fit_indices(frac)
# sklearn requires a 2d array, so make it pseudo 2d
full_x = numpy.atleast_2d(self.filter_period_end_time).T
x = numpy.atleast_2d(x).T
full_y = numpy.array(self.differential_power).T
y = numpy.array(y).T
gp = gaussian_process.GaussianProcess(regr='quadratic',
corr='squared_exponential',
theta0=theta0,
nugget=nugget,
random_start=100)
# Fit only based on the reduced set of the data
gp.fit(x, y)
y_pred, mean_squared_error = gp.predict(full_x, eval_MSE=True)
sigma = numpy.sqrt(mean_squared_error)
self.baseline_power = Quantity(y_pred, 'microcalories per second')
self.baseline_fit_data = {'x': full_x, 'y': y_pred, 'indices': fit_indices}
self.baseline_subtracted = self.differential_power - self.baseline_power
if plot:
self._plot_gaussian_baseline(full_x, full_y, sigma, x, y, y_pred)
self._plot_baseline_subtracted(full_x, self.baseline_subtracted)
def integrate_heat(self):
"""
Compute the heat evolved from each injection from differental power timeseries data.
"""
# Integrate heat produced by each injection.
for injection in self.injections:
# determine initial and final samples for injection i
# index of timepoint for first filtered differential power measurement
first_index = injection.first_index
# index of timepoint for last filtered differential power measurement
last_index = injection.last_index
# Determine excess energy input into sample cell (with respect to reference cell) throughout this injection and measurement period.
excess_energy_input = injection.filter_period * (
self.differential_power[
first_index:(last_index + 1)] - self.baseline_power[
first_index:(last_index + 1)]).sum()
logger.debug("injection %d, filter period %f s, integrating sample %d to %d" % (
injection.number,
injection.filter_period / ureg.second,
first_index,
last_index))
# Determine total heat evolved.
evolved_heat = - excess_energy_input
# Store heat evolved from this injection.
injection.evolved_heat = evolved_heat
return
class ExperimentYaml(BaseExperiment):
@staticmethod
def _parse_yaml(yaml_filename):
"""Open the yaml file and read is contents"""
import yaml
with open(yaml_filename, 'r') as infile:
# Experiment parameters
yaml_input = yaml.load(infile)
infile.close()
return yaml_input
def __init__(self, yaml_filename, experiment_name, instrument):
"""
Initialize an experiment from a Microcal VP-ITC formatted .itc file.
ARGUMENTS
data_filename (String) - the filename of the Microcal VP-ITC formatted .itc file to initialize the experiment from
TODO
* Add support for other formats of datafiles (XML, etc.).
"""
# Initialize.
super(ExperimentYaml, self).__init__(yaml_filename, experiment_name, instrument)
# the source filename from which data is read
# concentrations of various species in syringe
self.syringe_contents = dict()
self.syringe_concentration = dict()
# concentrations of various species in sample cell
self.sample_cell_contents = dict()
self.cell_concentration = dict()
# list of injections (and their associated data)
self.injections = list()
# time at end of filtering period
self.name = experiment_name
# Store the datafile filename.
self.data_filename = yaml_filename
# Check to make sure we can access the file.
if not os.access(yaml_filename, os.R_OK):
raise IOError("The file '%s' cannot be opened." % yaml_filename)
yaml_input = self._parse_yaml(yaml_filename)
# TODO more preliminary dict entry validations
if len(yaml_input['injection_heats']) != len(yaml_input['injection_volumes']):
raise ValueError('Mismatch between number of heats and volumes per injection in %s.' % yaml_filename)
# Extract and store data about the experiment.
self.number_of_injections = len(yaml_input['injection_heats'])
self.temperature = Quantity(yaml_input['temperature'],
yaml_input['temperature_unit'])
# Store the stated syringe concentration(s)
for key in yaml_input['syringe_concentrations'].keys():
self.syringe_concentration[key] = Quantity(yaml_input['syringe_concentrations'][key],
yaml_input['concentration_unit']).to('millimole per liter')
# Store the stated cell concentration(s)
for key in yaml_input['sample_cell_concentrations'].keys():
self.cell_concentration[key] = Quantity(yaml_input['sample_cell_concentrations'][key],
yaml_input['concentration_unit']).to('millimole per liter')
# Extract and store metadata about injections.
for index, (heat, volume) in enumerate(zip(yaml_input['injection_heats'], yaml_input['injection_volumes']), start=1):
# Extract data for injection and apply appropriate unit conversions.
# Entering 0.0 for any values not in the yaml.
# TODO some values are set in integrate_heat functions, but we
# currently ignore all but the heat
injectiondict = dict()
injectiondict['number'] = index
injectiondict['volume'] = Quantity(volume, yaml_input['volume_unit'])
injectiondict['duration'] = 0.0 * ureg.second
# time between beginning of injection and beginning of next
# injection
injectiondict['spacing'] = 0.0 * ureg.second
# time over which data channel is averaged to produce a single
# measurement
injectiondict['filter_period'] = 0.0 * ureg.second
# Possible input includes heat / moles of injectant, or raw heat
injectiondict['titrant_amount'] = sum(
self.syringe_concentration.values()) * Quantity(volume, yaml_input['volume_unit'])
try:
injectiondict['evolved_heat'] = Quantity(heat, yaml_input['heat_unit']).to('microcalorie')
except DimensionalityError:
# TODO This is probably only really correct for one syringe component
# Multipy by number of moles injected
evolved_heat = Quantity(heat, yaml_input['heat_unit']) * (Quantity(volume, yaml_input['volume_unit']) * sum(self.syringe_concentration.values()))
injectiondict['evolved_heat'] = evolved_heat.to('microcalorie')
# Store injection.
self.injections.append(Injection(**injectiondict))
self.observed_injection_heats = Quantity(numpy.zeros(len(self.injections)), 'microcalorie')
self.injection_volumes = Quantity(numpy.zeros(len(self.injections)), 'milliliter')
for index, injection in enumerate(self.injections):
self.observed_injection_heats[index] = injection.evolved_heat
self.injection_volumes[index] = injection.volume
return
class ExperimentOrigin(BaseExperiment):
pass
| gpl-3.0 |
aewhatley/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
Alex-Ian-Hamilton/sunpy | sunpy/lightcurve/sources/norh.py | 1 | 4421 | """Provides programs to process and analyse NoRH lightcurve data."""
from __future__ import absolute_import
import datetime
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import pandas
from sunpy.lightcurve import LightCurve
from sunpy.time import parse_time
from sunpy import config
from sunpy.extern.six.moves import urllib
TIME_FORMAT = config.get("general", "time_format")
__all__ = ['NoRHLightCurve']
class NoRHLightCurve(LightCurve):
"""
Nobeyama Radioheliograph Correlation LightCurve.
Nobeyama Radioheliograph (NoRH) is a radio telescope dedicated to observing
the Sun. It consists of 84 parabolic antennas with 80 cm diameter,
sitting on lines of 490 m long in the east/west and of 220 m long in the north/south.
It observes the full solar disk at 17 GHz and 34 GHz with a temporal resolution
down to 0.1 second resolution (typically 1 s). It is located in Japan at
`35.941667, 138.475833 <https://www.google.com/maps/place/Nobeyama+radio+observatory/@35.9410098,138.470243,14z/data=!4m2!3m1!1s0x0:0xe5a3821a5f6a3c4b>`_.
Its first observation was in April, 1992 and daily 8-hour observations are
available starting June, 1992.
Examples
--------
>>> import sunpy.lightcurve
>>> norh = sunpy.lightcurve.NoRHLightCurve.create('~/Data/norh/tca110607') # doctest: +SKIP
>>> norh = sunpy.lightcurve.NoRHLightCurve.create('2011/08/10')
>>> norh = sunpy.lightcurve.NoRHLightCurve.create('2011/08/10',wavelength='34')
>>> norh.peek() # doctest: +SKIP
References
----------
* `Nobeyama Radioheliograph Homepage <http://solar.nro.nao.ac.jp/norh/>`_
* `Analysis Manual <http://solar.nro.nao.ac.jp/norh/doc/manuale/index.html>`_
* `Nobeyama Correlation Plots <http://solar.nro.nao.ac.jp/norh/html/cor_plot/>`_
"""
def peek(self, **kwargs):
"""Plots the NoRH lightcurve
.. plot::
from sunpy import lightcurve as lc
from sunpy.data.sample import NORH_LIGHTCURVE
norh = lc.NoRHLightCurve.create(NORH_LIGHTCURVE)
norh.peek()
Parameters
----------
**kwargs : dict
Any additional plot arguments that should be used
when plotting.
"""
plt.figure()
axes = plt.gca()
data_lab=self.meta['OBS-FREQ'][0:2] + ' ' + self.meta['OBS-FREQ'][2:5]
axes.plot(self.data.index,self.data,label=data_lab)
axes.set_yscale("log")
axes.set_ylim(1e-4,1)
axes.set_title('Nobeyama Radioheliograph')
axes.set_xlabel('Start time: ' + self.data.index[0].strftime(TIME_FORMAT))
axes.set_ylabel('Correlation')
axes.legend()
plt.show()
@classmethod
def _get_url_for_date(cls, date, **kwargs):
"""
This method retrieves the url for NoRH correlation data for the given
date.
"""
# default urllib password anonymous@ is not accepted by the NoRH FTP
# server. include an accepted password in base url
baseurl = 'ftp://anonymous:mozilla@example.com@solar-pub.nao.ac.jp/pub/nsro/norh/data/tcx/'
# date is a datetime object
if 'wavelength' in kwargs:
if kwargs['wavelength'] == '34':
final_url = urllib.parse.urljoin(
baseurl, date.strftime('%Y/%m/tcz%y%m%d'))
else:
final_url = urllib.parse.urljoin(
baseurl, date.strftime('%Y/%m/tca%y%m%d'))
return final_url
@staticmethod
def _parse_fits(filepath):
"""This method parses NoRH tca and tcz correlation files."""
hdulist = fits.open(filepath)
header = OrderedDict(hdulist[0].header)
# For these NoRH files, the time series data is recorded in the primary
# HDU
data = hdulist[0].data
# No explicit time array in FITS file, so construct the time array from
# the FITS header
obs_start_time=parse_time(header['DATE-OBS'] + 'T' + header['CRVAL1'])
length = len(data)
cadence = np.float(header['CDELT1'])
sec_array = np.linspace(0, length-1, (length/cadence))
norh_time = []
for s in sec_array:
norh_time.append(obs_start_time + datetime.timedelta(0,s))
return header, pandas.DataFrame(data, index=norh_time)
| bsd-2-clause |
trankmichael/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
roshantha9/AbstractManycoreSim | src/RunSim_Exp_HEVCTile_Mapping_varCCR.py | 1 | 19318 | import sys, os, csv, pprint, math
import argparse
import numpy as np
import random
import shutil
import time
import json
## uncomment when running under CLI only version ##
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.stats
from matplotlib.colors import ListedColormap, NoNorm
from matplotlib import mlab
from itertools import cycle # for automatic markers
import matplotlib.cm as cm
from matplotlib.font_manager import FontProperties
from libProcessingElement.LocalScheduler import LocalRRScheduler, \
LocalEDFScheduler, \
LocalMPEG2FrameEDFScheduler, \
LocalMPEG2FrameListScheduler, \
LocalMPEG2FramePriorityScheduler, \
LocalMPEG2FramePriorityScheduler_WithDepCheck, \
LocalHEVCFramePriorityScheduler_WithDepCheck, \
LocalHEVCTilePriorityScheduler_WithDepCheck
from libResourceManager.RMTypes import RMTypes
from libProcessingElement.CPUTypes import CPUTypes
from libResourceManager.Mapper.MapperTypes import MapperTypes
from libTaskDispatcher.TDTypes import TDTypes
from libResourceManager.AdmissionControllerOptions import AdmissionControllerOptions
from libMappingAndScheduling.SemiDynamic.TaskMappingSchemes import TaskMappingSchemes
from libMappingAndScheduling.SemiDynamic.TaskSemiDynamicPrioritySchemes import TaskSemiDynamicPrioritySchemes
from libMappingAndScheduling.SemiDynamic.TaskMappingAndPriAssCombinedSchemes import TaskMappingAndPriAssCombinedSchemes
from libMappingAndScheduling.SemiDynamic.TaskTileMappingAndPriAssCombinedSchemes import TaskTileMappingAndPriAssCombinedSchemes
from libMappingAndScheduling.FullyDynamic.TaskMappingSchemesFullyDyn import TaskMappingSchemesFullyDyn
from libApplicationModel.Task import TaskModel
from util_scripts.gen_res_list import get_res_list
from SimParams import SimParams
import Multicore_MPEG_Model as MMMSim
import libApplicationModel.HEVCWorkloadParams as HEVCWLP
EXP_DATADIR = "experiment_data/hevc_tile_mapping_varCCR/"
#EXP_DATADIR = "experiment_data/hevc_tiles_mapping_priassfcfs/"
NOC_SIZE = [(3,3), (5,5), (7,7), (9,9), (10,10)]
# name the report filenames
global_tm_fname = "_timeline.png"
global_vs_bs_fname = "_vsbs.js"
global_util_fname = "_util.js"
global_wf_res_fname = "_wfressumm.js"
global_gops_opbuff_fname = "_gopsopbuffsumm.js"
global_rmtbl_dt_fname = "_rmtbldt.js"
global_ibuff_fname = "_ibuff.js"
global_obuff_fname = "_obuff.js"
global_nodetqs_fname = "_nodetqs.js"
global_rmtaskrelease_fname = "_rmtaskrel.js"
global_mappingandpriass_fname = "_mappingandpriass.js"
global_flowscompleted_fname = "_flwcompleted.js"
global_flowscompletedshort_fname = "_flwcompletedshort.js"
global_nodetaskexectime_fname = "_nodetaskexectime.js"
global_schedtestresults_fname = "_schedtestresults.js"
global_utilvsschedresults_fname = "_utilvsschedresults.js"
global_rmtaskmappingtable_fname = "_rmtaskmappingtable.js"
global_rmvolatiletaskmappingtable_fname = "_rmvolatiletaskmappingtable.js"
global_processedctus_fname = "_processedctus.js"
global_taskscompleted_fname = "_taskscompleted.js"
global_mapperexecoverhead_fname = "_mapperexecoverhead.js"
global_smartmmcid_fname = "_smartmmcid.js"
global_jobccrinfo_fname = "_jobccrinfo.js"
# do we use ms signalling or not
def _get_feedback_status(cmbmppri_type):
if cmbmppri_type in [TaskTileMappingAndPriAssCombinedSchemes.TASKTILEMAPPINGANDPRIASSCOMBINED_PRLOWRESFIRST_LOWUTIL_WITHMONITORING_AVGCC_V1,
TaskTileMappingAndPriAssCombinedSchemes.TASKTILEMAPPINGANDPRIASSCOMBINED_PRLOWRESFIRST_MOSTSLACK_WITHMONITORING_AVGCC_V1,
TaskTileMappingAndPriAssCombinedSchemes.TASKTILEMAPPINGANDPRIASSCOMBINED_PRLOWRESFIRST_CLUSTLS_MOSTSLACK_WITHMONITORING_AVGCC]:
return True
else:
return False
###################################################################################################
# SCENARIO based runsim for different types of AC/mappers/CCR/noc size
###################################################################################################
def runSim_TileMapping_varCCR(
forced_seed = None,
cmbmppri_type=None,
wl_config=None, # we assume 1 vid per wf
memp_select = None,
cc_scale_down = None
):
seed = forced_seed
print "SEED === " + str(seed)
random.seed(seed)
np.random.seed(seed)
# get resolution list
res_list = get_res_list(wl_config)
# fixed params
SimParams.SIM_RUNTIME = 10000
SimParams.HEVC_DUMP_FRAME_DATAFILE = False
SimParams.HEVC_LOAD_FRAME_DATAFILE = False
SimParams.HEVC_FRAME_GENRAND_SEED = seed
SimParams.HEVC_TILELEVEL_SPLITTING_ENABLE = True
SimParams.LOCAL_SCHEDULER_TYPE = LocalHEVCTilePriorityScheduler_WithDepCheck()
SimParams.SIM_ENTITY_RESOURCEMANAGER_CLASS = RMTypes.OPENLOOP
SimParams.SIM_ENTITY_CPUNODE_CLASS = CPUTypes.OPENLOOP_HEVC_TILE_LEVEL
SimParams.TASK_MODEL = TaskModel.TASK_MODEL_HEVC_TILE_LEVEL
SimParams.SIM_ENTITY_MAPPER_CLASS = MapperTypes.OPENLOOP_WITH_HEVCTILE
SimParams.SIM_ENTITY_TASKDISPATCHER_CLASS = TDTypes.OPENLOOP_WITH_HEVCTILE
#SimParams.MS_SIGNALLING_NOTIFY_TASK_COMPLETE_ENABLE = _get_feedback_status(cmbmppri_type)
SimParams.MS_SIGNALLING_NOTIFY_TASK_COMPLETE_ENABLE = False
print "SimParams.MS_SIGNALLING_NOTIFY_TASK_COMPLETE_ENABLE :: ", SimParams.MS_SIGNALLING_NOTIFY_TASK_COMPLETE_ENABLE
SimParams.MS_SIGNALLING_NOTIFY_FLOW_COMPLETE_ENABLE = False
SimParams.RESOURCEMANAGER_USE_VOLATILE_TMTBL = True
SimParams.MAPPING_PREMAPPING_ENABLED = True
SimParams.COMBINED_MAPPING_AND_PRIASS = cmbmppri_type
SimParams.DYNAMIC_TASK_MAPPING_SCHEME = TaskMappingSchemes.TASKMAPPINGSCHEMES_NONE # this will be overridden
SimParams.DYNAMIC_TASK_PRIASS_SCHEME = TaskSemiDynamicPrioritySchemes.TASKSEMIDYNAMICPRIORITYSCHEMES_NONE
SimParams.MMC_SMART_NODE_SELECTION_ENABLE = True
SimParams.MMC_SMART_NODE_SELECTION_TYPE = memp_select
SimParams.MMC_ENABLE_DATATRANSMISSION_MODELLING = True
SimParams.HEVC_GOPGEN_USEPROBABILISTIC_MODEL = True
SimParams.NOC_W = 6
SimParams.NOC_H = 6
SimParams.NUM_NODES = (SimParams.NOC_W * SimParams.NOC_H)
SimParams.NUM_WORKFLOWS = len(res_list)
SimParams.DVB_RESOLUTIONS_FIXED = res_list
# SimParams.NUM_WORKFLOWS = 1
# SimParams.DVB_RESOLUTIONS_FIXED = [#(3840,2160),
# #(2560,1440),
# #(1920,1080),
# #(1280,720),
# #(854,480),
# (640,360),
# #(512,288),
# #(426,240), <-- not a multiple of 8
# ]
SimParams.DVB_RESOLUTIONS_SELECTED_RANDOM = False
SimParams.NUM_INPUTBUFFERS = SimParams.NUM_WORKFLOWS
SimParams.WFGEN_MIN_GOPS_PER_VID = 5
SimParams.WFGEN_MAX_GOPS_PER_VID = 5
SimParams.WFGEN_INITIAL_VID_GAP_MIN = 0.0
SimParams.WFGEN_INITIAL_VID_GAP_MAX = 0.1
pprint.pprint(SimParams.DVB_RESOLUTIONS_FIXED)
pprint.pprint(SimParams.NUM_WORKFLOWS)
# scale down the computation cost of CU decoding
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['ICU'] = (HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['ICU'][0]*float(cc_scale_down), HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['ICU'][1]*float(cc_scale_down))
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['PCU'] = (HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['PCU'][0]*float(cc_scale_down), HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['PCU'][1]*float(cc_scale_down))
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['BCU'] = (HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['BCU'][0]*float(cc_scale_down), HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['BCU'][1]*float(cc_scale_down))
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['SkipCU'] = (HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['SkipCU'][0]*float(cc_scale_down), HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR['SkipCU'][1]*float(cc_scale_down))
print "------"
print "new HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR ::"
pprint.pprint(HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR)
print "------"
cc_scale_down_str_rep_withoutdot = str(cc_scale_down).replace(".","")
# getting the reporing file name prefix
exp_key = "ac"+str(SimParams.AC_TEST_OPTION) + \
"mp"+str(SimParams.DYNAMIC_TASK_MAPPING_SCHEME)+ \
"pr"+str(SimParams.DYNAMIC_TASK_PRIASS_SCHEME)+ \
"cmb"+str(SimParams.COMBINED_MAPPING_AND_PRIASS) + \
"mmp"+str(SimParams.MMC_SMART_NODE_SELECTION_TYPE) +\
"ccrs"+str(cc_scale_down_str_rep_withoutdot)
subdir1 = EXP_DATADIR + wl_config + "/" + exp_key + "/"
subdir2 = subdir1 + "seed_"+str(seed)+"/"
final_subdir = subdir2
fname_prefix = "HEVCTileSplitTest__" + exp_key + "_"
final_fname = fname_prefix+str(SimParams.NOC_H)+"_"+str(SimParams.NOC_W)+"_"
check_fname = _get_fname(final_subdir, final_fname)['taskscompleted_fname']
print "Checking file exists : " + str(check_fname)
if(_check_file_exists(check_fname) == True):
print "Simulation already exists.."
else:
print "----------------------------------------------------------------------------------------------------------------------------"
print subdir2
print "Running HEVCTile_Mapping-runSim_TileMapping-"+ fname_prefix +": num_wf=" + str(SimParams.NUM_WORKFLOWS) + \
", noc_h="+str(SimParams.NOC_H)+","+"noc_w="+str(SimParams.NOC_W) + ", " + \
exp_key + \
", seed="+str(seed)
print "----------------------------------------------------------------------------------------------------------------------------"
env, last_scheduled_task_time = MMMSim.runMainSimulation(initial_rand_seed=seed, dump_workload=True)
env.run(until=last_scheduled_task_time+SimParams.SIM_RUNTIME)
print "Simulation Ended at : %.15f" % env.now
_makeDir(final_subdir)
# get filenames
filenames = _get_fname(final_subdir, final_fname)
# name the report filenames
_dump_captured_data(filenames)
def _get_fname(exp_dir, fname_prefix):
tm_fname = exp_dir + fname_prefix + global_tm_fname
vs_bs_fname = exp_dir + fname_prefix + global_vs_bs_fname
util_fname = exp_dir + fname_prefix + global_util_fname
wf_res_fname = exp_dir + fname_prefix + global_wf_res_fname
gops_opbuff_fname = exp_dir + fname_prefix + global_gops_opbuff_fname
rmtbl_dt_fname = exp_dir + fname_prefix + global_rmtbl_dt_fname
ibuff_fname = exp_dir + fname_prefix + global_ibuff_fname
obuff_fname = exp_dir + fname_prefix + global_obuff_fname
nodetqs_fname = exp_dir + fname_prefix + global_nodetqs_fname
rmtaskrelease_fname = exp_dir + fname_prefix + global_rmtaskrelease_fname
mappingandpriass_fname = exp_dir + fname_prefix + global_mappingandpriass_fname
flowscompleted_fname = exp_dir + fname_prefix + global_flowscompleted_fname
flowscompletedshort_fname = exp_dir + fname_prefix + global_flowscompletedshort_fname
nodetaskexectime_fname = exp_dir + fname_prefix + global_nodetaskexectime_fname
schedtestresults_fname = exp_dir + fname_prefix + global_schedtestresults_fname
utilvsschedresults_fname = exp_dir + fname_prefix + global_utilvsschedresults_fname
rmtaskmappingtable_fname = exp_dir + fname_prefix + global_rmtaskmappingtable_fname
rmvolatiletaskmappingtable_fname = exp_dir + fname_prefix + global_rmvolatiletaskmappingtable_fname
processedctus_fname = exp_dir + fname_prefix + global_processedctus_fname
taskscompleted_fname = exp_dir + fname_prefix + global_taskscompleted_fname
mapperexecoverhead_fname = exp_dir + fname_prefix + global_mapperexecoverhead_fname
smartmmcid_fname = exp_dir + fname_prefix + global_smartmmcid_fname
jobccrinfo_fname = exp_dir + fname_prefix + global_jobccrinfo_fname
result = {
"tm_fname" : tm_fname,
"vs_bs_fname" : vs_bs_fname,
"util_fname" : util_fname,
"wf_res_fname" : wf_res_fname,
"gops_opbuff_fname" : gops_opbuff_fname,
"rmtbl_dt_fname" : rmtbl_dt_fname,
"ibuff_fname" : ibuff_fname,
"obuff_fname" : obuff_fname,
"nodetqs_fname" : nodetqs_fname,
"rmtaskrelease_fname" : rmtaskrelease_fname,
"mappingandpriass_fname" : mappingandpriass_fname,
"flowscompleted_fname" : flowscompleted_fname,
"flowscompletedshort_fname" : flowscompletedshort_fname,
"nodetaskexectime_fname" : nodetaskexectime_fname,
"schedtestresults_fname" : schedtestresults_fname,
"utilvsschedresults_fname" : utilvsschedresults_fname,
"rmtaskmappingtable_fname" : rmtaskmappingtable_fname,
"rmvolatiletaskmappingtable_fname" : rmvolatiletaskmappingtable_fname,
"processedctus_fname" : processedctus_fname,
"taskscompleted_fname" : taskscompleted_fname,
"mapperexecoverhead_fname" : mapperexecoverhead_fname,
"smartmmcid_fname" : smartmmcid_fname,
"jobccrinfo_fname" : jobccrinfo_fname,
}
return result
def _dump_captured_data(filenames):
(wf_results_summary, gops_in_outputbuff_summary) = MMMSim.SimMon.report_DecodedWorkflows_Summary(timeline_fname=filenames["tm_fname"],
wf_res_summary_fname = filenames["wf_res_fname"],
gops_opbuff_summary_fname = filenames["gops_opbuff_fname"],
rmtbl_dt_summary_fname = filenames["rmtbl_dt_fname"],
output_format = "json",
task_model_type = TaskModel.TASK_MODEL_HEVC_TILE_LEVEL)
#MMMSim.SimMon.report_InstUtilisation(dump_to_file=filenames["util_fname"])
MMMSim.SimMon.report_OutputBufferContents(dump_to_file=filenames["obuff_fname"])
#MMMSim.SimMon.report_FlowsCompleted(dump_to_file=filenames["flowscompleted_fname"])
MMMSim.SimMon.report_FlowsCompleted_short(dump_to_file=filenames["flowscompletedshort_fname"])
MMMSim.SimMon.report_HEVC_NumCTU()
#MMMSim.SimMon.report_RMTaskMappingTable(dump_to_file=filenames["rmtaskmappingtable_fname"])
MMMSim.SimMon.report_VerifyFlows_HEVCTileLvl()
#MMMSim.SimMon.report_RMVolatileTaskMappingTable(dump_to_file=filenames["rmvolatiletaskmappingtable_fname"])
MMMSim.SimMon.report_NodeTasksCompleted(dump_to_file=filenames["taskscompleted_fname"])
#MMMSim.SimMon.report_MappingExecOverhead(dump_to_file=filenames["mapperexecoverhead_fname"])
#MMMSim.SimMon.report_PremappedMMCPortID(dump_to_file=filenames["smartmmcid_fname"])
MMMSim.SimMon.report_JobCCRInfo(dump_to_file=filenames["jobccrinfo_fname"])
def _makeDir(directory):
try:
os.stat(directory)
except:
try:
os.makedirs(directory)
except OSError, e:
print str(e)
pass
def _check_file_exists(fname):
return os.path.exists(fname)
# format : "720x576,544x576,528x576,480x576,426x240,320x240,240x180"
def _reslist_convert(str_res_list):
res_list = []
if(str_res_list.count(',')>0):
res_combos = str_res_list.split(',')
if(len(res_combos)>1):
for each_r in res_combos:
res_h_w = each_r.split('x')
int_res_h = int(res_h_w[0])
int_res_w = int(res_h_w[1])
res_list.append((int_res_h, int_res_w))
else:
sys.exit("_reslist_convert:: Error")
else:
res_h_w = str_res_list.split('x')
int_res_h = int(res_h_w[0])
int_res_w = int(res_h_w[1])
res_list.append((int_res_h, int_res_w))
return res_list
############################################################################
############################################################################
## MAIN SCRIPT SECTION
############################################################################
############################################################################
sys.setrecursionlimit(1500)
# collect command line params
parser = argparse.ArgumentParser(__file__, description="Run specified experiment on abstract simulator")
parser = argparse.ArgumentParser(__file__, description="Run specified experiment on abstract simulator")
parser.add_argument("--wl_config", help="workload config", default=None)
parser.add_argument("--cmbmppri_type", help="combined mapping and pri-assignment type", type=int, default=-1)
parser.add_argument("--forced_seed", help="forced seed", type=int, default=-1)
parser.add_argument("--memp_select", help="memory port selector", type=int, default=-1)
parser.add_argument("--cc_scale_down", help="Scale down computation cost by this amount", type=float, default=1.0)
args = parser.parse_args()
pprint.pprint(args)
####################################
## check which experiment to run ##
####################################
# if (args.forced_seed==-1):
# seed=1234
# else:
# seed = args.forced_seed
if (args.wl_config == None) or (args.forced_seed == -1) or (args.cmbmppri_type == -1):
sys.exit("Arguments invalid")
# construct filename
runSim_TileMapping_varCCR(
forced_seed = args.forced_seed,
cmbmppri_type = args.cmbmppri_type,
wl_config = args.wl_config,
memp_select = args.memp_select,
cc_scale_down = args.cc_scale_down
)
| gpl-3.0 |
PrashntS/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
adaptive-learning/proso-apps | proso_models/management/commands/recompute_model.py | 1 | 15912 | from clint.textui import progress
from contextlib import closing
from django.conf import settings
from django.core.cache import cache
from django.core.management.base import BaseCommand, CommandError
from django.db import connection
from django.db import transaction
from optparse import make_option
from proso.list import flatten
from proso_models.models import instantiate_from_config, get_config, Item
from proso.django.config import set_default_config_name
from proso.django.db import is_on_postgresql
from proso.models.environment import InMemoryEnvironment
from proso.time import timer
from proso_common.models import Config
from proso_models.models import EnvironmentInfo, ENVIRONMENT_INFO_CACHE_KEY
from proso_models.models import get_predictive_model
import json
import math
import matplotlib.pyplot as plt
import numpy
import sys
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
'--initial',
dest='initial',
action='store_true',
default=False),
make_option(
'--config-name',
dest='config_name',
type=str,
default='default'),
make_option(
'--batch-size',
dest='batch_size',
type=int,
default=100000),
make_option(
'--cancel',
dest='cancel',
action='store_true',
default=False),
make_option(
'--garbage-collector',
dest='garbage_collector',
action='store_true',
default=False),
make_option(
'--finish',
dest='finish',
action='store_true',
default=False),
make_option(
'--validate',
dest='validate',
action='store_true',
default=False),
make_option(
'--dry',
dest='dry',
action='store_true',
default=False),
make_option(
'--limit',
dest='limit',
type=int,
default=None),
make_option(
'--force',
dest='force',
action='store_true',
default=False)
)
def handle(self, *args, **options):
with transaction.atomic():
if options['cancel']:
self.handle_cancel(options)
elif options['garbage_collector']:
self.handle_gc(options)
elif options['dry']:
self.handle_dry(options)
else:
self.handle_recompute(options)
if options['validate']:
self.handle_validate(options)
def handle_validate(self, options):
timer('recompute_validation')
info = self.load_environment_info(options['initial'], options['config_name'], False)
with closing(connection.cursor()) as cursor:
cursor.execute(
'''
SELECT key, user_id, item_primary_id, item_secondary_id
FROM proso_models_variable
WHERE info_id = %s
GROUP BY 1, 2, 3, 4 HAVING COUNT(*) > 1
''', [info.id])
fetched = cursor.fetchall()
if len(fetched) > 0:
print(' -- there are {} violations of variable uniqueness:')
for key, user, primary, secondary in fetched:
print(' - ', key, user, primary, secondary)
sys.exit('canceling due to previous error')
else:
print(' -- validation passed:', timer('recompute_validation'), 'seconds')
def handle_dry(self, options):
info = self.load_environment_info(options['initial'], options['config_name'], True)
environment = InMemoryEnvironment()
environment = self.load_environment(info)
users, items = self.load_user_and_item_ids(info, options['batch_size'])
items += list(set(flatten(Item.objects.get_reachable_parents(items).values())))
environment.prefetch(users, items)
predictive_model = get_predictive_model(info.to_json())
with closing(connection.cursor()) as cursor:
cursor.execute('SELECT COUNT(*) FROM proso_models_answer')
answers_total = cursor.fetchone()[0]
if options['limit'] is not None:
answers_total = min(answers_total, options['limit'])
print('total:', answers_total)
processed = 0
prediction = numpy.empty(answers_total)
correct = numpy.empty(answers_total)
while processed < answers_total:
cursor.execute(
'''
SELECT
id,
user_id,
item_id,
item_asked_id,
item_answered_id,
time,
response_time,
guess
FROM proso_models_answer
ORDER BY id
OFFSET %s LIMIT %s
''', [processed, options['batch_size']])
for (answer_id, user, item, asked, answered, time, response_time, guess) in cursor:
correct[processed] = asked == answered
prediction[processed] = predictive_model.predict_and_update(
environment,
user,
item,
asked == answered,
time,
item_answered=answered,
item_asked=asked,
guess=guess,
answer_id=answer_id,
response_time=response_time,
)
environment.process_answer(user, item, asked, answered, time, answer_id, response_time, guess)
processed += 1
if processed >= answers_total:
break
print('processed:', processed)
filename = settings.DATA_DIR + '/recompute_model_report_{}.json'.format(predictive_model.__class__.__name__)
model_report = report(prediction, correct)
with open(filename, 'w') as outfile:
json.dump(model_report, outfile)
print('Saving report to:', filename)
brier_graphs(model_report['brier'], predictive_model)
def handle_gc(self, options):
timer('recompute_gc')
print(' -- collecting garbage')
to_gc = [str(x.id) for x in EnvironmentInfo.objects.filter(status=EnvironmentInfo.STATUS_DISABLED).all()]
if not to_gc:
print(' -- no environment info to collect')
return
to_gc_str = ','.join(to_gc)
with closing(connection.cursor()) as cursor:
cursor.execute('DELETE FROM proso_models_variable WHERE info_id IN (%s)' % to_gc_str)
variables = cursor.rowcount
cursor.execute('DELETE FROM proso_models_environmentinfo WHERE id IN (%s)' % to_gc_str)
infos = cursor.rowcount
if is_on_postgresql():
timer('recompute_vacuum')
cursor.execute('VACUUM FULL ANALYZE VERBOSE proso_models_variable')
print(' -- vacuum phase, time:', timer('recompute_vacuum'), 'seconds')
print(' -- collecting garbage, time:', timer('recompute_gc'), 'seconds, deleted', variables, 'variables,', infos, 'environment info records')
def handle_cancel(self, options):
info = self.load_environment_info(False, options['config_name'], False)
print(' -- cancelling')
info.status = EnvironmentInfo.STATUS_DISABLED
info.save()
def handle_recompute(self, options):
timer('recompute_all')
info = self.load_environment_info(options['initial'], options['config_name'], False)
if options['finish']:
to_process = self.number_of_answers_to_process(info)
if self.number_of_answers_to_process(info) >= options['batch_size'] and not options['force']:
raise CommandError("There is more then allowed number of answers (%s) to process." % to_process)
self.recompute(info, options)
else:
self.recompute(info, options)
print(' -- total time of recomputation:', timer('recompute_all'), 'seconds')
def recompute(self, info, options):
print(' -- preparing phase')
timer('recompute_prepare')
environment = self.load_environment(info)
users, items = self.load_user_and_item_ids(info, options['batch_size'])
items += list(set(flatten(Item.objects.get_reachable_parents(items).values())))
environment.prefetch(users, items)
predictive_model = get_predictive_model(info.to_json())
print(' -- preparing phase, time:', timer('recompute_prepare'), 'seconds')
timer('recompute_model')
print(' -- model phase')
with closing(connection.cursor()) as cursor:
cursor.execute(
'''
SELECT
id,
user_id,
item_id,
item_asked_id,
item_answered_id,
time,
response_time,
guess
FROM proso_models_answer
ORDER BY id
OFFSET %s LIMIT %s
''', [info.load_progress, options['batch_size']])
progress_bar = progress.bar(cursor, every=max(1, cursor.rowcount // 100), expected_size=cursor.rowcount)
info.load_progress += cursor.rowcount
for (answer_id, user, item, asked, answered, time, response_time, guess) in progress_bar:
predictive_model.predict_and_update(
environment,
user,
item,
asked == answered,
time,
item_answered=answered,
item_asked=asked,
guess=guess,
answer_id=answer_id,
response_time=response_time,
)
environment.process_answer(user, item, asked, answered, time, answer_id, response_time, guess)
print(' -- model phase, time:', timer('recompute_model'), 'seconds')
timer('recompute_flush')
print(' -- flushing phase')
environment.flush(clean=options['finish'])
print(' -- flushing phase, time:', timer('recompute_flush'), 'seconds, total number of answers:', info.load_progress)
if options['finish']:
timer('recompute_finish')
print(' -- finishing phase')
try:
previous_info = EnvironmentInfo.objects.get(status=EnvironmentInfo.STATUS_ACTIVE)
previous_info.status = EnvironmentInfo.STATUS_DISABLED
previous_info.save()
cache.delete(ENVIRONMENT_INFO_CACHE_KEY)
except EnvironmentInfo.DoesNotExist:
pass
info.status = EnvironmentInfo.STATUS_ACTIVE
print(' -- finishing phase, time:', timer('recompute_finish'), 'seconds')
info.save()
def load_environment_info(self, initial, config_name, dry):
set_default_config_name(config_name)
if hasattr(self, '_environment_info'):
return self._environment_info
config = Config.objects.from_content(get_config('proso_models', 'predictive_model', default={}))
if dry:
self._environment_info = EnvironmentInfo(config=config)
return self._environment_info
if initial:
if EnvironmentInfo.objects.filter(status=EnvironmentInfo.STATUS_LOADING).count() > 0:
raise CommandError("There is already one currently loading environment.")
last_revisions = EnvironmentInfo.objects.filter(config=config).order_by('-revision')[:1]
if last_revisions:
new_revision = last_revisions[0].id + 1
else:
new_revision = 0
self._environment_info = EnvironmentInfo.objects.create(config=config, revision=new_revision)
else:
self._environment_info = EnvironmentInfo.objects.get(config=config, status=EnvironmentInfo.STATUS_LOADING)
return self._environment_info
def load_environment(self, info):
return instantiate_from_config(
'proso_models', 'recompute_environment',
default_class='proso_models.environment.InMemoryDatabaseFlushEnvironment',
pass_parameters=[info])
def load_user_and_item_ids(self, info, batch_size):
with closing(connection.cursor()) as cursor:
cursor.execute(
'''
SELECT
user_id,
item_id,
item_asked_id,
item_answered_id
FROM proso_models_answer
ORDER BY id
OFFSET %s LIMIT %s
''', [info.load_progress, batch_size])
unzipped = list(zip(*cursor.fetchall()))
if len(unzipped) == 0:
return [], []
else:
return list(set(unzipped[0])), [x for x in list(set(unzipped[1]) | set(unzipped[2]) | set(unzipped[3])) if x is not None]
def number_of_answers_to_process(self, info):
with closing(connection.cursor()) as cursor:
cursor.execute(
'''
SELECT COUNT(id)
FROM proso_models_answer
OFFSET %s
''', [info.load_progress])
fetched = cursor.fetchone()
if fetched is None:
return 0
else:
return fetched[0]
def report(predictions, real):
return {
'rmse': rmse(predictions, real),
'brier': brier(predictions, real),
}
def rmse(predictions, real):
return math.sqrt(numpy.mean((predictions - real) ** 2))
def brier(predictions, real, bins=20):
counts = numpy.zeros(bins)
correct = numpy.zeros(bins)
prediction = numpy.zeros(bins)
for p, r in zip(predictions, real):
bin = min(int(p * bins), bins - 1)
counts[bin] += 1
correct[bin] += r
prediction[bin] += p
prediction_means = prediction / counts
prediction_means[numpy.isnan(prediction_means)] = ((numpy.arange(bins) + 0.5) / bins)[numpy.isnan(prediction_means)]
correct_means = correct / counts
correct_means[numpy.isnan(correct_means)] = 0
size = len(predictions)
answer_mean = sum(correct) / size
return {
"reliability": sum(counts * (correct_means - prediction_means) ** 2) / size,
"resolution": sum(counts * (correct_means - answer_mean) ** 2) / size,
"uncertainty": answer_mean * (1 - answer_mean),
"detail": {
"bin_count": bins,
"bin_counts": list(counts),
"bin_prediction_means": list(prediction_means),
"bin_correct_means": list(correct_means),
}
}
def brier_graphs(brier, model):
plt.figure()
plt.plot(brier['detail']['bin_prediction_means'], brier['detail']['bin_correct_means'])
plt.plot((0, 1), (0, 1))
bin_count = brier['detail']['bin_count']
counts = numpy.array(brier['detail']['bin_counts'])
bins = (numpy.arange(bin_count) + 0.5) / bin_count
plt.bar(bins, counts / max(counts), width=(0.5 / bin_count), alpha=0.5)
plt.title(model.__class__.__name__)
plt.xlabel('Predicted')
plt.ylabel('Observed')
filename = settings.DATA_DIR + '/recompute_model_report_{}.svg'.format(model.__class__.__name__)
plt.savefig(filename)
print('Plotting to:', filename)
| mit |
rahulgayatri23/moose-core | setup.py | 1 | 14427 | # setup.py ---
#
# Filename: setup.py
# Description:
# Author: subha
# Maintainer:
# Created: Sun Dec 7 20:32:02 2014 (+0530)
# Version:
# Last-Updated: Wed Mar 2 12:23:55 2016 (-0500)
# By: Subhasis Ray
# Update #: 32
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""
This scripts compiles moose using python distutils module. As of Sat
Dec 27 14:41:33 EST 2014 , it works on cygwin 64 bit on Windows 7 with
the latest packages installed. Also tested was Ubuntu 13.04.
Pre-requisites:
You need to have Python-dev, numpy, libxml-dev, gsl-dev and hdf5-dev
libraries installed.
libSBML needs to be downloaded, built and installed separately.
"""
import numpy as np
CLASSIFIERS = ["Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics"]
NAME='moose'
DESCRIPTION = 'MOOSE is the Multiscale Object Oriented Simulation Environment'
fid = open('README.md', 'r')
long_description = fid.read()
fid.close()
idx = max(0, long_description.find('MOOSE is the'))
idx_end = long_description.find('# VERSION')
LONG_DESCRIPTION = long_description[idx:idx_end]
URL = 'http://moose.ncbs.res.in'
DOWNLOAD_URL = 'http://sourceforge.net/projects/moose/files/latest/download'
LICENSE = "GPLv3"
AUTHOR = '(alphabetically) Upinder Bhalla, Niraj Dudani, Aditya Gilra, Aviral Goel, GV Harsharani, Subhasis Ray and Dilawar Singh'
PLATFORMS = "Linux, Windows/cygwin"
VERSION = '3.0.1'
BUILD_TARGET = 'moose._moose'
SOURCES=['external/muparser/src/muParser.cpp',
'external/muparser/src/muParserBase.cpp',
'external/muparser/src/muParserTokenReader.cpp',
'external/muparser/src/muParserError.cpp',
'external/muparser/src/muParserCallback.cpp',
'external/muparser/src/muParserBytecode.cpp',
'basecode/consts.cpp',
'basecode/Element.cpp',
'basecode/DataElement.cpp',
'basecode/GlobalDataElement.cpp',
'basecode/LocalDataElement.cpp',
'basecode/Eref.cpp',
'basecode/Finfo.cpp',
'basecode/DestFinfo.cpp',
'basecode/Cinfo.cpp',
'basecode/SrcFinfo.cpp',
'basecode/ValueFinfo.cpp',
'basecode/SharedFinfo.cpp',
'basecode/FieldElementFinfo.cpp',
'basecode/FieldElement.cpp',
'basecode/Id.cpp',
'basecode/ObjId.cpp',
'basecode/global.cpp',
'basecode/SetGet.cpp',
'basecode/OpFuncBase.cpp',
'basecode/EpFunc.cpp',
'basecode/HopFunc.cpp',
'basecode/SparseMatrix.cpp',
'basecode/doubleEq.cpp',
'basecode/testAsync.cpp',
'basecode/main.cpp',
'biophysics/IntFire.cpp',
'biophysics/SpikeGen.cpp',
'biophysics/RandSpike.cpp',
'biophysics/CompartmentDataHolder.cpp',
'biophysics/CompartmentBase.cpp',
'biophysics/Compartment.cpp',
'biophysics/SymCompartment.cpp',
'biophysics/GapJunction.cpp',
'biophysics/ChanBase.cpp',
'biophysics/ChanCommon.cpp',
'biophysics/HHChannelBase.cpp',
'biophysics/HHChannel.cpp',
'biophysics/HHGate.cpp',
'biophysics/HHGate2D.cpp',
'biophysics/HHChannel2D.cpp',
'biophysics/CaConcBase.cpp',
'biophysics/CaConc.cpp',
'biophysics/MgBlock.cpp',
'biophysics/Nernst.cpp',
'biophysics/Neuron.cpp',
'biophysics/ReadCell.cpp',
'biophysics/SynChan.cpp',
'biophysics/NMDAChan.cpp',
'biophysics/Spine.cpp',
'biophysics/testBiophysics.cpp',
'biophysics/IzhikevichNrn.cpp',
'biophysics/DifShell.cpp',
'biophysics/Leakage.cpp',
'biophysics/VectorTable.cpp',
'biophysics/MarkovRateTable.cpp',
'biophysics/MarkovChannel.cpp',
'biophysics/MarkovGslSolver.cpp',
'biophysics/MatrixOps.cpp',
'biophysics/MarkovSolverBase.cpp',
'biophysics/MarkovSolver.cpp',
'biophysics/VClamp.cpp',
'biophysics/SwcSegment.cpp',
'biophysics/ReadSwc.cpp',
'builtins/Arith.cpp',
'builtins/Group.cpp',
'builtins/Mstring.cpp',
'builtins/Func.cpp',
'builtins/Function.cpp',
'builtins/Variable.cpp',
'builtins/InputVariable.cpp',
'builtins/TableBase.cpp',
'builtins/Table.cpp',
'builtins/Interpol.cpp',
'builtins/StimulusTable.cpp',
'builtins/TimeTable.cpp',
'builtins/Stats.cpp',
'builtins/SpikeStats.cpp',
'builtins/Interpol2D.cpp',
'builtins/HDF5WriterBase.cpp',
'builtins/HDF5DataWriter.cpp',
'builtins/NSDFWriter.cpp',
'builtins/testNSDF.cpp',
'builtins/testBuiltins.cpp',
'device/PulseGen.cpp',
'device/DiffAmp.cpp',
'device/PIDController.cpp',
'device/RC.cpp',
'diffusion/FastMatrixElim.cpp',
'diffusion/DiffPoolVec.cpp',
'diffusion/Dsolve.cpp',
'diffusion/testDiffusion.cpp',
'hsolve/HSolveStruct.cpp',
'hsolve/HinesMatrix.cpp',
'hsolve/HSolvePassive.cpp',
'hsolve/RateLookup.cpp',
'hsolve/HSolveActive.cpp',
'hsolve/HSolveActiveSetup.cpp',
'hsolve/HSolveInterface.cpp',
'hsolve/HSolve.cpp',
'hsolve/HSolveUtils.cpp',
'hsolve/testHSolve.cpp',
'hsolve/ZombieCompartment.cpp',
'hsolve/ZombieCaConc.cpp',
'hsolve/ZombieHHChannel.cpp',
'intfire/IntFireBase.cpp',
'intfire/LIF.cpp',
'intfire/QIF.cpp',
'intfire/ExIF.cpp',
'intfire/AdExIF.cpp',
'intfire/AdThreshIF.cpp',
'intfire/IzhIF.cpp',
'intfire/testIntFire.cpp',
'kinetics/PoolBase.cpp',
'kinetics/Pool.cpp',
'kinetics/BufPool.cpp',
'kinetics/ReacBase.cpp',
'kinetics/Reac.cpp',
'kinetics/EnzBase.cpp',
'kinetics/CplxEnzBase.cpp',
'kinetics/Enz.cpp',
'kinetics/MMenz.cpp',
'kinetics/Species.cpp',
'kinetics/ReadKkit.cpp',
'kinetics/WriteKkit.cpp',
'kinetics/ReadCspace.cpp',
'kinetics/lookupVolumeFromMesh.cpp',
'kinetics/testKinetics.cpp',
'ksolve/KinSparseMatrix.cpp',
'ksolve/ZombiePool.cpp',
'ksolve/ZombiePoolInterface.cpp',
'ksolve/ZombieBufPool.cpp',
'ksolve/ZombieReac.cpp',
'ksolve/ZombieEnz.cpp',
'ksolve/ZombieMMenz.cpp',
'ksolve/ZombieFunction.cpp',
'ksolve/VoxelPoolsBase.cpp',
'ksolve/VoxelPools.cpp',
'ksolve/GssaVoxelPools.cpp',
'ksolve/RateTerm.cpp',
'ksolve/FuncTerm.cpp',
'ksolve/Stoich.cpp',
'ksolve/Ksolve.cpp',
'ksolve/SteadyState.cpp',
'ksolve/Gsolve.cpp',
'ksolve/testKsolve.cpp',
'mesh/ChemCompt.cpp',
'mesh/MeshCompt.cpp',
'mesh/MeshEntry.cpp',
'mesh/CubeMesh.cpp',
'mesh/CylBase.cpp',
'mesh/CylMesh.cpp',
'mesh/NeuroNode.cpp',
'mesh/NeuroMesh.cpp',
'mesh/SpineEntry.cpp',
'mesh/SpineMesh.cpp',
'mesh/PsdMesh.cpp',
'mesh/testMesh.cpp',
'mpi/PostMaster.cpp',
'mpi/testMpi.cpp',
'msg/Msg.cpp',
'msg/DiagonalMsg.cpp',
'msg/OneToAllMsg.cpp',
'msg/OneToOneMsg.cpp',
'msg/SingleMsg.cpp',
'msg/SparseMsg.cpp',
'msg/OneToOneDataIndexMsg.cpp',
'msg/testMsg.cpp',
'pymoose/moosemodule.cpp',
'pymoose/mfield.cpp',
'pymoose/vec.cpp',
'pymoose/melement.cpp',
'pymoose/test_moosemodule.cpp',
'randnum/mt19937ar.cpp',
'sbml/MooseSbmlWriter.cpp',
'sbml/MooseSbmlReader.cpp',
'scheduling/Clock.cpp',
'scheduling/testScheduling.cpp',
'shell/Shell.cpp',
'shell/ShellCopy.cpp',
'shell/ShellThreads.cpp',
'shell/LoadModels.cpp',
'shell/SaveModels.cpp',
'shell/Neutral.cpp',
'shell/Wildcard.cpp',
'shell/testShell.cpp',
'signeur/Adaptor.cpp',
'signeur/testSigNeur.cpp',
'synapse/SynHandlerBase.cpp',
'synapse/SimpleSynHandler.cpp',
'synapse/STDPSynHandler.cpp',
'synapse/Synapse.cpp',
'synapse/STDPSynapse.cpp',
'synapse/testSynapse.cpp',
'utility/strutil.cpp',
'utility/types.cpp',
'utility/setupenv.cpp',
'utility/numutil.cpp',
'utility/Annotator.cpp',
'utility/Vec.cpp',
'benchmarks/benchmarks.cpp',
'benchmarks/kineticMarks.cpp'
]
INCLUDE_DIRS=['/usr/include',
'/usr/local/include',
np.get_include(),
'.',
'external/muparser/include',
'basecode',
'biophysics',
'builtins',
'device',
'diffusion',
'hsolve',
'intfire',
'kinetics',
'kk',
'ksolve',
'mesh',
'mpi',
'msg',
'pymoose',
'randnum',
'sbml',
'scheduling',
'shell',
'signeur',
'synapse',
'utility']
LIBRARIES = ['gsl'
, 'gslcblas' # required to avoid undefined refs on import
, 'hdf5'
, 'sbml'
, 'hdf5_hl' # required to avoid undefined refs on import
]
LIBRARY_DIRS = ['/usr/lib64', '/usr/lib', '/usr/local/lib']
DEFINE_MACROS = [('USE_GSL', None),
('USE_HDF5', None),
('NDEBUG', None),
('USE_NUMPY', None),
('H5_NO_DEPRECATED_SYMBOLS', None),
('PYMOOSE', None),
('USE_SBML', None),
('USE_HDF5', None)]
EXTRA_LINK_ARGS = ['-L/usr/lib64', '-Wl,-R/usr/lib64'] # distutils disregards everything in LIBRARY_DIRS except /usr/local/lib, hence this
PACKAGES = ['moose', 'moose.backend', 'moose.neuroml', 'moose.topology']
PACKAGE_DATA = {'moose': ['LICENSE', 'README.md'], 'mgui': ['icons/*', 'colormaps/*', 'bioModels/*']}
REQUIRES = ['numpy'] #, 'gsl', 'hdf5', 'libsbml'] # using full-dependency
# python-libsbml, although available on PyPI, does not build with pip
# install and gsl is a C library. The links are just for informing the
# users.
DEPENDENCY_LINKS = ['git+git://git.savannah.gnu.org/gsl.git',
'svn+svn://svn.code.sf.net/p/sbml/code/trunk']
INSTALL_REQUIRES = None # "requirements.txt"
EXTRAS_REQUIRE = {} #['matplotlib', 'PyQt4', 'suds']
setup_info = dict(
name=NAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
platforms=PLATFORMS,
version=VERSION,
packages=PACKAGES,
package_data=PACKAGE_DATA,
requires=REQUIRES,
package_dir={'': 'python'},
#scripts=SCRIPTS,
)
## The following monkey patch allows parallel compilation of the C++
## files Taken from here: From here:
## http://stackoverflow.com/questions/11013851/speeding-up-build-process-with-distutils
##
## Also, if you are rerunning setup.py after checking out a few
## changes, consider using cccache as suggested in the above discussion
## to avoid recompiling every file.
##
## monkey-patch for parallel compilation
##
# def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
# # those lines are copied from distutils.ccompiler.CCompiler directly
# macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
# cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# # parallel code
# N=4 # number of parallel compilations
# import multiprocessing.pool
# def _single_compile(obj):
# try: src, ext = build[obj]
# except KeyError: return
# self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# # convert to list, imap is evaluated on-demand
# list(multiprocessing.pool.ThreadPool(N).imap(_single_compile,objects))
# return objects
# import distutils.ccompiler
# distutils.ccompiler.CCompiler.compile=parallelCCompile
## Monkey-patch ends here.
try:
from setuptools import setup
from setuptools.extension import Extension
setup_info['install_requires'] = INSTALL_REQUIRES
setup_info['extras_require'] = EXTRAS_REQUIRE
setup_info['dependency_links'] = DEPENDENCY_LINKS
except ImportError:
from distutils.core import setup, Extension
moose_module = Extension(
BUILD_TARGET,
sources=SOURCES,
include_dirs=INCLUDE_DIRS,
libraries=LIBRARIES,
library_dirs=LIBRARY_DIRS,
runtime_library_dirs=LIBRARY_DIRS,
define_macros=DEFINE_MACROS,
extra_link_args=EXTRA_LINK_ARGS
)
print((moose_module.runtime_library_dirs))
setup_info['ext_modules'] = [moose_module]
setup(**setup_info)
#
# setup.py ends here
| gpl-3.0 |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/stats/fama_macbeth.py | 2 | 7257 | from pandas.core.base import StringMixin
from pandas.compat import StringIO, range
import numpy as np
from pandas.core.api import Series, DataFrame
import pandas.stats.common as common
from pandas.util.decorators import cache_readonly
# flake8: noqa
def fama_macbeth(**kwargs):
"""Runs Fama-MacBeth regression.
Parameters
----------
Takes the same arguments as a panel OLS, in addition to:
nw_lags_beta: int
Newey-West adjusts the betas by the given lags
"""
window_type = kwargs.get('window_type')
if window_type is None:
klass = FamaMacBeth
else:
klass = MovingFamaMacBeth
return klass(**kwargs)
class FamaMacBeth(StringMixin):
def __init__(self, y, x, intercept=True, nw_lags=None,
nw_lags_beta=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies=None, verbose=False):
import warnings
warnings.warn("The pandas.stats.fama_macbeth module is deprecated and will be "
"removed in a future version. We refer to external packages "
"like statsmodels, see here: http://statsmodels.sourceforge.net/stable/index.html",
FutureWarning, stacklevel=4)
if dropped_dummies is None:
dropped_dummies = {}
self._nw_lags_beta = nw_lags_beta
from pandas.stats.plm import MovingPanelOLS
self._ols_result = MovingPanelOLS(
y=y, x=x, window_type='rolling', window=1,
intercept=intercept,
nw_lags=nw_lags, entity_effects=entity_effects,
time_effects=time_effects, x_effects=x_effects, cluster=cluster,
dropped_dummies=dropped_dummies, verbose=verbose)
self._cols = self._ols_result._x.columns
@cache_readonly
def _beta_raw(self):
return self._ols_result._beta_raw
@cache_readonly
def _stats(self):
return _calc_t_stat(self._beta_raw, self._nw_lags_beta)
@cache_readonly
def _mean_beta_raw(self):
return self._stats[0]
@cache_readonly
def _std_beta_raw(self):
return self._stats[1]
@cache_readonly
def _t_stat_raw(self):
return self._stats[2]
def _make_result(self, result):
return Series(result, index=self._cols)
@cache_readonly
def mean_beta(self):
return self._make_result(self._mean_beta_raw)
@cache_readonly
def std_beta(self):
return self._make_result(self._std_beta_raw)
@cache_readonly
def t_stat(self):
return self._make_result(self._t_stat_raw)
@cache_readonly
def _results(self):
return {
'mean_beta': self._mean_beta_raw,
'std_beta': self._std_beta_raw,
't_stat': self._t_stat_raw,
}
@cache_readonly
def _coef_table(self):
buffer = StringIO()
buffer.write('%13s %13s %13s %13s %13s %13s\n' %
('Variable', 'Beta', 'Std Err', 't-stat', 'CI 2.5%', 'CI 97.5%'))
template = '%13s %13.4f %13.4f %13.2f %13.4f %13.4f\n'
for i, name in enumerate(self._cols):
if i and not (i % 5):
buffer.write('\n' + common.banner(''))
mean_beta = self._results['mean_beta'][i]
std_beta = self._results['std_beta'][i]
t_stat = self._results['t_stat'][i]
ci1 = mean_beta - 1.96 * std_beta
ci2 = mean_beta + 1.96 * std_beta
values = '(%s)' % name, mean_beta, std_beta, t_stat, ci1, ci2
buffer.write(template % values)
if self._nw_lags_beta is not None:
buffer.write('\n')
buffer.write('*** The Std Err, t-stat are Newey-West '
'adjusted with Lags %5d\n' % self._nw_lags_beta)
return buffer.getvalue()
def __unicode__(self):
return self.summary
@cache_readonly
def summary(self):
template = """
----------------------Summary of Fama-MacBeth Analysis-------------------------
Formula: Y ~ %(formulaRHS)s
# betas : %(nu)3d
----------------------Summary of Estimated Coefficients------------------------
%(coefTable)s
--------------------------------End of Summary---------------------------------
"""
params = {
'formulaRHS': ' + '.join(self._cols),
'nu': len(self._beta_raw),
'coefTable': self._coef_table,
}
return template % params
class MovingFamaMacBeth(FamaMacBeth):
def __init__(self, y, x, window_type='rolling', window=10,
intercept=True, nw_lags=None, nw_lags_beta=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies=None, verbose=False):
if dropped_dummies is None:
dropped_dummies = {}
self._window_type = common._get_window_type(window_type)
self._window = window
FamaMacBeth.__init__(
self, y=y, x=x, intercept=intercept,
nw_lags=nw_lags, nw_lags_beta=nw_lags_beta,
entity_effects=entity_effects, time_effects=time_effects,
x_effects=x_effects, cluster=cluster,
dropped_dummies=dropped_dummies, verbose=verbose)
self._index = self._ols_result._index
self._T = len(self._index)
@property
def _is_rolling(self):
return self._window_type == 'rolling'
def _calc_stats(self):
mean_betas = []
std_betas = []
t_stats = []
# XXX
mask = self._ols_result._rolling_ols_call[2]
obs_total = mask.astype(int).cumsum()
start = self._window - 1
betas = self._beta_raw
for i in range(start, self._T):
if self._is_rolling:
begin = i - start
else:
begin = 0
B = betas[max(obs_total[begin] - 1, 0): obs_total[i]]
mean_beta, std_beta, t_stat = _calc_t_stat(B, self._nw_lags_beta)
mean_betas.append(mean_beta)
std_betas.append(std_beta)
t_stats.append(t_stat)
return np.array([mean_betas, std_betas, t_stats])
_stats = cache_readonly(_calc_stats)
def _make_result(self, result):
return DataFrame(result, index=self._result_index, columns=self._cols)
@cache_readonly
def _result_index(self):
mask = self._ols_result._rolling_ols_call[2]
# HACK XXX
return self._index[mask.cumsum() >= self._window]
@cache_readonly
def _results(self):
return {
'mean_beta': self._mean_beta_raw[-1],
'std_beta': self._std_beta_raw[-1],
't_stat': self._t_stat_raw[-1],
}
def _calc_t_stat(beta, nw_lags_beta):
N = len(beta)
B = beta - beta.mean(0)
C = np.dot(B.T, B) / N
if nw_lags_beta is not None:
for i in range(nw_lags_beta + 1):
cov = np.dot(B[i:].T, B[:(N - i)]) / N
weight = i / (nw_lags_beta + 1)
C += 2 * (1 - weight) * cov
mean_beta = beta.mean(0)
std_beta = np.sqrt(np.diag(C)) / np.sqrt(N)
t_stat = mean_beta / std_beta
return mean_beta, std_beta, t_stat
| gpl-2.0 |
maxalbert/tohu | tests/v7/test_item_list.py | 1 | 1616 | import pandas as pd
from pandas.util.testing import assert_frame_equal
from .context import tohu
from tohu.v7.item_list import ItemList
from tohu.v7.custom_generator.tohu_items_class import make_tohu_items_class
def test_item_list():
values = [11, 55, 22, 66, 33]
item_list = ItemList(values)
assert item_list.items == values
assert item_list == values
assert len(item_list) == 5
assert item_list[3] == 66
assert [x for x in item_list] == values
item_list_2 = ItemList(values)
assert item_list == item_list_2
item_list_3 = ItemList([1, 5, 8, 3])
assert item_list != item_list_3
def test_to_df():
Quux = make_tohu_items_class("Quux", field_names=["foo", "bar", "baz"])
item1 = Quux(42, True, "hello")
item2 = Quux(23, False, "world")
item_list = ItemList([item1, item2], tohu_items_cls=Quux)
df = item_list.to_df()
df_expected = pd.DataFrame({"foo": [42, 23], "bar": [True, False], "baz": ["hello", "world"]})
assert_frame_equal(df, df_expected)
def test_to_df_with_selected_fields_and_column_renaming():
Quux = make_tohu_items_class("Quux", field_names=["foo", "bar", "baz"])
items = [Quux(42, True, "hello"), Quux(23, False, "world")]
item_list = ItemList(items, tohu_items_cls=Quux)
df = item_list.to_df(fields=["baz", "foo"])
df_expected = pd.DataFrame({"baz": ["hello", "world"], "foo": [42, 23]})
assert_frame_equal(df, df_expected)
df = item_list.to_df(fields={"xx": "bar", "yy": "foo"})
df_expected = pd.DataFrame({"xx": [True, False], "yy": [42, 23]})
assert_frame_equal(df, df_expected)
| mit |
Titan-C/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
vybstat/scikit-learn | examples/model_selection/plot_learning_curve.py | 250 | 4171 | """
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
| bsd-3-clause |
kevin-intel/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 25 | 1818 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
# we only take the first two features. We could avoid this ugly
# slicing by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['orange', 'cyan', 'cornflowerblue'])
cmap_bold = ListedColormap(['darkorange', 'c', 'darkblue'])
for shrinkage in [None, .2]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold,
edgecolor='k', s=20)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
sh1ng/imba | create_products.py | 1 | 1559 | import gc
import pandas as pd
import numpy as np
import os
if __name__ == '__main__':
path = "data"
order_prior = pd.read_csv(os.path.join(path, "order_products__prior.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order':np.uint8,
'reordered': bool})
orders = pd.read_csv(os.path.join(path, "orders.csv"), dtype={'order_id':np.uint32,
'user_id': np.uint32,
'eval_set': 'category',
'order_number':np.uint8,
'order_dow': np.uint8,
'order_hour_of_day': np.uint8
})
print('loaded')
orders = orders.loc[orders.eval_set == 'prior', :]
orders_user = orders[['order_id', 'user_id']]
labels = pd.merge(order_prior, orders_user, on='order_id')
labels = labels.loc[:, ['user_id', 'product_id']].drop_duplicates()
print(labels)
print('save')
print(labels.shape)
print(labels.columns)
labels.to_pickle('data/previous_products.pkl') | agpl-3.0 |
Intel-Corporation/tensorflow | tensorflow/contrib/timeseries/examples/known_anomaly.py | 24 | 7880 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using an exogenous feature to ignore a known anomaly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/changepoints.csv")
def state_space_estimator(exogenous_feature_columns):
"""Constructs a StructuralEnsembleRegressor."""
def _exogenous_update_condition(times, features):
del times # unused
# Make exogenous updates sparse by setting an update condition. This in
# effect allows missing exogenous features: if the condition evaluates to
# False, no update is performed. Otherwise we sometimes end up with "leaky"
# updates which add unnecessary uncertainty to the model even when there is
# no changepoint.
return tf.equal(tf.squeeze(features["is_changepoint"], axis=-1), "yes")
return (
tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=12,
# Extract a smooth period by constraining the number of latent values
# being cycled between.
cycle_num_latent_values=3,
num_features=1,
exogenous_feature_columns=exogenous_feature_columns,
exogenous_update_condition=_exogenous_update_condition),
# Use truncated backpropagation with a window size of 64, batching
# together 4 of these windows (random offsets) per training step. Training
# with exogenous features often requires somewhat larger windows.
4, 64)
def autoregressive_estimator(exogenous_feature_columns):
input_window_size = 8
output_window_size = 2
return (
tf.contrib.timeseries.ARRegressor(
periodicities=12,
num_features=1,
input_window_size=input_window_size,
output_window_size=output_window_size,
exogenous_feature_columns=exogenous_feature_columns),
64, input_window_size + output_window_size)
def train_and_evaluate_exogenous(
estimator_fn, csv_file_name=_DATA_FILE, train_steps=300):
"""Training, evaluating, and predicting on a series with changepoints."""
# Indicate the format of our exogenous feature, in this case a string
# representing a boolean value.
string_feature = tf.feature_column.categorical_column_with_vocabulary_list(
key="is_changepoint", vocabulary_list=["no", "yes"])
# Specify the way this feature is presented to the model, here using a one-hot
# encoding.
one_hot_feature = tf.feature_column.indicator_column(
categorical_column=string_feature)
estimator, batch_size, window_size = estimator_fn(
exogenous_feature_columns=[one_hot_feature])
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
# Indicate the format of our CSV file. First we have two standard columns,
# one for times and one for values. The third column is a custom exogenous
# feature indicating whether each timestep is a changepoint. The
# changepoint feature name must match the string_feature column name
# above.
column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,
tf.contrib.timeseries.TrainEvalFeatures.VALUES,
"is_changepoint"),
# Indicate dtypes for our features.
column_dtypes=(tf.int64, tf.float32, tf.string),
# This CSV has a header line; here we just ignore it.
skip_header_lines=1)
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=batch_size, window_size=window_size)
estimator.train(input_fn=train_input_fn, steps=train_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Create an input_fn for prediction, with a simulated changepoint. Since all
# of the anomalies in the training data are explained by the exogenous
# feature, we should get relatively confident predictions before the indicated
# changepoint (since we are telling the model that no changepoint exists at
# those times) and relatively uncertain predictions after.
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features={
"is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]})))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
# Indicate the locations of the changepoints for plotting vertical lines.
anomaly_locations = []
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["is_changepoint"] == "yes":
anomaly_locations.append(int(row["time"]))
anomaly_locations.append(predictions["times"][49])
return (times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations)
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit, anomaly_locations):
"""Plot the time series and anomalies in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.axvline(anomaly_locations[0], linestyle="dotted", label="changepoints")
for anomaly_location in anomaly_locations[1:]:
pyplot.axvline(anomaly_location, linestyle="dotted")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Ignoring a known anomaly (state space)",
*train_and_evaluate_exogenous(
estimator_fn=state_space_estimator))
make_plot("Ignoring a known anomaly (autoregressive)",
*train_and_evaluate_exogenous(
estimator_fn=autoregressive_estimator, train_steps=3000))
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
cielling/jupyternbs | tests/tester_nrp.py | 1 | 3821 | from __future__ import print_function
import pandas as pd
from bs4 import BeautifulSoup as BSoup
from sys import path as syspath
syspath.insert(0, "..")
from CanslimParams import CanslimParams
from myAssert import areEqual
from os import path as ospath
testDir = ospath.join("..", "TestData")
all10Ks = pd.read_csv(ospath.join(testDir, "nrp_all_10ks.csv"), parse_dates=['date'], dtype={'cik':str, 'conm':str, 'type':str,'path':str})
all10Qs = pd.read_csv(ospath.join(testDir, "nrp_all_10qs.csv"), parse_dates=['date'], dtype={'cik':str, 'conm':str, 'type':str,'path':str})
## Most recent filing in TestData: 2019 Q1
NRPCanslimParams= CanslimParams("NRP", all10Qs, all10Ks)
## Load the data, and proceed if successful.
if NRPCanslimParams.loadData(testDir):
## Test all the EPS stuff
## Test the last four quarters to cover the case where the 10-K was filed instead of the 10-Q.
print("Getting EPS for Q0:")
expect = 2.26
val = NRPCanslimParams.getEpsQuarter(0)
areEqual(expect, val)
print("Getting EPS for Q-1:")
expect = 8.77 - 1.71 - 2.46 - 1.49
val = NRPCanslimParams.getEpsQuarter(-1)
areEqual(expect, val)
print("Getting EPS for Q-2:")
expect = 1.71
val = NRPCanslimParams.getEpsQuarter(-2)
areEqual(expect, val)
print("Getting EPS for Q-3:")
expect = 0.0
val = NRPCanslimParams.getEpsQuarter(-3)
areEqual(expect, val)
## Test the last two years of EPS
print("Getting EPS for Y0:")
expect = 8.77
val = NRPCanslimParams.getEpsAnnual(0)
areEqual(expect, val)
print("Getting EPS for Y-1:")
expect = 5.06
val = NRPCanslimParams.getEpsAnnual(-1)
areEqual(expect, val)
print("Getting EPS growth for Q-3 to Q-4:")
expect = 2.46 / 1.49 *100.0
val = NRPCanslimParams.getEpsGrowthQuarter(-3, -4)
areEqual(expect, val)
print("Getting EPS growth for Y0 to Y-1:")
expect = 8.77/5.06*100.0
val = NRPCanslimParams.getEpsGrowthAnnual(0, -1)
areEqual(expect, val)
# print("Getting EPS growth rate for Q-3 to Q-4:")
# expect =
# val = NRPCanslimParams.getEpsGrowthRateQuarter(-4, -3)
# areEqual(expect, val)
# print("Getting EPS growth acceleration:")
# expect = []
# val = NRPCanslimParams.getEpsGrowthAcceleration(4)
# for i in range(0,3):
# print(i)
# areEqual(expect[i], val[i])
# print("Getting stability of EPS:")
# expect =
# val = NRPCanslimParams.getStabilityOfEpsGrowth(4)
# areEqual(expect, val)
## Test the Sales stuff
print("Getting sales for Q-2:")
expect = 122360. - 28565. - 39123. - 26088.
val = NRPCanslimParams.getSalesQuarter(-2)
areEqual(expect, val)
print("Getting sales for Y-1:")
expect = 89208.0
val = NRPCanslimParams.getSalesAnnual(-1)
areEqual(expect, val)
print("Getting sales growth between Q0 and Q-2:")
expect = 19106./(122360. - 28565. - 39123. - 26088.)*100.
val = NRPCanslimParams.getSalesGrowthQuarter(0, -2)
areEqual(expect, val)
# print("Getting sales growth rate between Q0 and Q-2:")
# expect =
# val = NRPCanslimParams.getSalesGrowthRateQuarter(-2, 0)
# areEqual(expect, val)
# print("Getting sales growth acceleration:")
# expect = []
# val = NRPCanslimParams.getSalesGrowthAcceleration(4)
# for i in range(0,3):
# areEqual(expect[i], val[i])
## Test the ROE - delta net income/ delta stockholder's equity
print("Getting current ROE:")
expect = (19106. - 39123.) / (11614. - 30105.)
val = NRPCanslimParams.getRoeTTM()
areEqual(expect, val)
## Test the auxiliary functions
else:
print("Unable to load data for NRP")
NRPCanslimParams.logErrors()
del NRPCanslimParams | agpl-3.0 |
hlin117/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 85 | 5600 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
eigen_solvers = ['dense', 'arpack']
# ----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
# ----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
# Test the error raised when parameter passed to lle is invalid
def test_lle_init_parameters():
X = np.random.rand(5, 3)
clf = manifold.LocallyLinearEmbedding(eigen_solver="error")
msg = "unrecognized eigen_solver 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
clf = manifold.LocallyLinearEmbedding(method="error")
msg = "unrecognized method 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
# regression test for #6033
def test_integer_input():
rand = np.random.RandomState(0)
X = rand.randint(0, 100, size=(20, 3))
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(method=method, n_neighbors=10)
clf.fit(X) # this previously raised a TypeError
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| gpl-2.0 |
smartscheduling/scikit-learn-categorical-tree | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
abhishekkrthakur/scikit-learn | sklearn/svm/tests/test_sparse.py | 27 | 10643 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
clf.fit(X, Y)
sp_clf = svm.SVC(kernel='linear', probability=True, random_state=0)
sp_clf.fit(X_sp, Y)
assert_array_equal(sp_clf.predict(T), true_result)
assert_true(sparse.issparse(sp_clf.support_vectors_))
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_true(sparse.issparse(sp_clf.dual_coef_))
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_true(sparse.issparse(sp_clf.coef_))
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
assert_array_almost_equal(clf.support_, sp_clf.support_)
assert_array_almost_equal(clf.predict(T), sp_clf.predict(T))
# refit with a different dataset
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
assert_array_almost_equal(clf.support_, sp_clf.support_)
assert_array_almost_equal(clf.predict(T2), sp_clf.predict(T2))
assert_array_almost_equal(clf.predict_proba(T2),
sp_clf.predict_proba(T2), 4)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
"""Test the sparse SVC with the iris dataset"""
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_error():
"""
Test that it gives proper exception on deficient input
"""
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
"""
Similar to test_SVC
"""
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
"""Test the sparse LinearSVC with the iris dataset"""
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
"""
Test class weights
"""
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
"""
Test weights on individual samples
"""
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
"""
Test that sparse liblinear honours intercept_scaling param
"""
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
"""
Test on a subset from the 20newsgroups dataset.
This catchs some bugs if input is not correctly converted into
sparse format or weights are not correctly initialized.
"""
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/api/sankey_demo_old.py | 6 | 7147 | #!/usr/bin/env python
from __future__ import print_function
__author__ = "Yannick Copin <ycopin@ipnl.in2p3.fr>"
__version__ = "Time-stamp: <10/02/2010 16:49 ycopin@lyopc548.in2p3.fr>"
import numpy as np
def sankey(ax,
outputs=[100.], outlabels=None,
inputs=[100.], inlabels='',
dx=40, dy=10, outangle=45, w=3, inangle=30, offset=2, **kwargs):
"""Draw a Sankey diagram.
outputs: array of outputs, should sum up to 100%
outlabels: output labels (same length as outputs),
or None (use default labels) or '' (no labels)
inputs and inlabels: similar for inputs
dx: horizontal elongation
dy: vertical elongation
outangle: output arrow angle [deg]
w: output arrow shoulder
inangle: input dip angle
offset: text offset
**kwargs: propagated to Patch (e.g., fill=False)
Return (patch,[intexts,outtexts]).
"""
import matplotlib.patches as mpatches
from matplotlib.path import Path
outs = np.absolute(outputs)
outsigns = np.sign(outputs)
outsigns[-1] = 0 # Last output
ins = np.absolute(inputs)
insigns = np.sign(inputs)
insigns[0] = 0 # First input
assert sum(outs) == 100, "Outputs don't sum up to 100%"
assert sum(ins) == 100, "Inputs don't sum up to 100%"
def add_output(path, loss, sign=1):
h = (loss/2 + w)*np.tan(outangle/180. * np.pi) # Arrow tip height
move, (x, y) = path[-1] # Use last point as reference
if sign == 0: # Final loss (horizontal)
path.extend([(Path.LINETO, [x+dx, y]),
(Path.LINETO, [x+dx, y+w]),
(Path.LINETO, [x+dx+h, y-loss/2]), # Tip
(Path.LINETO, [x+dx, y-loss-w]),
(Path.LINETO, [x+dx, y-loss])])
outtips.append((sign, path[-3][1]))
else: # Intermediate loss (vertical)
path.extend([(Path.CURVE4, [x+dx/2, y]),
(Path.CURVE4, [x+dx, y]),
(Path.CURVE4, [x+dx, y+sign*dy]),
(Path.LINETO, [x+dx-w, y+sign*dy]),
(Path.LINETO, [x+dx+loss/2, y+sign*(dy+h)]), # Tip
(Path.LINETO, [x+dx+loss+w, y+sign*dy]),
(Path.LINETO, [x+dx+loss, y+sign*dy]),
(Path.CURVE3, [x+dx+loss, y-sign*loss]),
(Path.CURVE3, [x+dx/2+loss, y-sign*loss])])
outtips.append((sign, path[-5][1]))
def add_input(path, gain, sign=1):
h = (gain/2)*np.tan(inangle/180. * np.pi) # Dip depth
move, (x, y) = path[-1] # Use last point as reference
if sign == 0: # First gain (horizontal)
path.extend([(Path.LINETO, [x-dx, y]),
(Path.LINETO, [x-dx+h, y+gain/2]), # Dip
(Path.LINETO, [x-dx, y+gain])])
xd, yd = path[-2][1] # Dip position
indips.append((sign, [xd-h, yd]))
else: # Intermediate gain (vertical)
path.extend([(Path.CURVE4, [x-dx/2, y]),
(Path.CURVE4, [x-dx, y]),
(Path.CURVE4, [x-dx, y+sign*dy]),
(Path.LINETO, [x-dx-gain/2, y+sign*(dy-h)]), # Dip
(Path.LINETO, [x-dx-gain, y+sign*dy]),
(Path.CURVE3, [x-dx-gain, y-sign*gain]),
(Path.CURVE3, [x-dx/2-gain, y-sign*gain])])
xd, yd = path[-4][1] # Dip position
indips.append((sign, [xd, yd+sign*h]))
outtips = [] # Output arrow tip dir. and positions
urpath = [(Path.MOVETO, [0, 100])] # 1st point of upper right path
lrpath = [(Path.LINETO, [0, 0])] # 1st point of lower right path
for loss, sign in zip(outs, outsigns):
add_output(sign>=0 and urpath or lrpath, loss, sign=sign)
indips = [] # Input arrow tip dir. and positions
llpath = [(Path.LINETO, [0, 0])] # 1st point of lower left path
ulpath = [(Path.MOVETO, [0, 100])] # 1st point of upper left path
for gain, sign in reversed(list(zip(ins, insigns))):
add_input(sign<=0 and llpath or ulpath, gain, sign=sign)
def revert(path):
"""A path is not just revertable by path[::-1] because of Bezier
curves."""
rpath = []
nextmove = Path.LINETO
for move, pos in path[::-1]:
rpath.append((nextmove, pos))
nextmove = move
return rpath
# Concatenate subpathes in correct order
path = urpath + revert(lrpath) + llpath + revert(ulpath)
codes, verts = zip(*path)
verts = np.array(verts)
# Path patch
path = Path(verts, codes)
patch = mpatches.PathPatch(path, **kwargs)
ax.add_patch(patch)
if False: # DEBUG
print("urpath", urpath)
print("lrpath", revert(lrpath))
print("llpath", llpath)
print("ulpath", revert(ulpath))
xs, ys = zip(*verts)
ax.plot(xs, ys, 'go-')
# Labels
def set_labels(labels, values):
"""Set or check labels according to values."""
if labels == '': # No labels
return labels
elif labels is None: # Default labels
return ['%2d%%' % val for val in values]
else:
assert len(labels) == len(values)
return labels
def put_labels(labels, positions, output=True):
"""Put labels to positions."""
texts = []
lbls = output and labels or labels[::-1]
for i, label in enumerate(lbls):
s, (x, y) = positions[i] # Label direction and position
if s == 0:
t = ax.text(x+offset, y, label,
ha=output and 'left' or 'right', va='center')
elif s > 0:
t = ax.text(x, y+offset, label, ha='center', va='bottom')
else:
t = ax.text(x, y-offset, label, ha='center', va='top')
texts.append(t)
return texts
outlabels = set_labels(outlabels, outs)
outtexts = put_labels(outlabels, outtips, output=True)
inlabels = set_labels(inlabels, ins)
intexts = put_labels(inlabels, indips, output=False)
# Axes management
ax.set_xlim(verts[:, 0].min()-dx, verts[:, 0].max()+dx)
ax.set_ylim(verts[:, 1].min()-dy, verts[:, 1].max()+dy)
ax.set_aspect('equal', adjustable='datalim')
return patch, [intexts, outtexts]
if __name__=='__main__':
import matplotlib.pyplot as plt
outputs = [10., -20., 5., 15., -10., 40.]
outlabels = ['First', 'Second', 'Third', 'Fourth', 'Fifth', 'Hurray!']
outlabels = [s+'\n%d%%' % abs(l) for l, s in zip(outputs, outlabels)]
inputs = [60., -25., 15.]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[], title="Sankey diagram")
patch, (intexts, outtexts) = sankey(ax, outputs=outputs,
outlabels=outlabels, inputs=inputs,
inlabels=None, fc='g', alpha=0.2)
outtexts[1].set_color('r')
outtexts[-1].set_fontweight('bold')
plt.show()
| mit |
CSB-IG/natk | ninnx/jaccard/jinx_network.py | 2 | 5154 | #This is a piece of code for generating a network using as input a
#dictionary file (dictionary.py).
### TO-DO: To generate a pickle
import networkx as nx
import matplotlib.pyplot as plt
import itertools as itools
from math import log, log10
def jaccard_index( first, *others ):
return float( len ( first.intersection( *others ) ) )/ float( len( first.union( *others) ) )
# diccionario = {
# 'a': set([1,3,5]),
# 'b': set([1,2,5]),
# }
diccionario={'Protomaya': set(['p','b','tap','tapg','t','tg','c','cg','cap','capg','cretr','cretrg','kpal','kpalg','k','kg','q','qg','s','sap','sretr','h','hw','x','xw','g','m','n','nvel','l','r','w','y','i','e','a','o','u']),'Achi':set(['p','b','cap','sap','capg','t','tg','c','cg','c','cg','cap','cg','k','kg','k','kg','q','g','s','sap','sap','sap','h','w','h','w','g','m','n','h','l','r','w','y','i','e','i','a','o','u']),'Aguacateco':set(['p','b','c','cg','c','capg','c','cg','cap','cretr','cretrg','cap','cretrg','capg','cretrg','k','kg','sap','k','kg','k','q','kg','qg','s','sap','sap','sretr''h','h','h','h','g','m','n','l','cretr','w','y','i','u','e','a','o','u']),'Cakchiquel':set(['p','b','cap','capg','t','c','cg','c','cg','cap','capg','k','kg','k','kg','q','kg','g','qg','s','sap','sap','s','sap','h','w','h','w','g','m','b','n','h','l','r','w','m','y','i','u','e','i','a','o','u']),'Chuj':set(['p','b','t','tg','t','cap','c','cg','c','cg','cap','cap','cap','cap','cg','cap','capg','k','kg','s','sap','h','h','h','h','g','m','n','nvel','l','r','y','w','y','i','e','a','o','u']),'Chol':set(['p','b','tap','tapg','c','cg','c','cg','cap','capg','cap','cap','cg','k','kg','k','kg','s','sap','sap','h','h','h','h','g','m','n','l','y','w','y','i','e','e','a','o','u']),'Chorti':set(['p','b','t','tg','t','cap','tg','capg','c','c','cg','cap','capg','tg','cap','cap','capg','cap','capg','k','s','sap','h','h','h','g','m','n','n','r','y','w','y','i','e','a','o','u']),'Huasteco':set(['p','b','t','tg','t','tg','dfs','t','c','cg','dfs','cap','tg','cg','cap','cretr','capg','cg','c','cg','k','kg','k','kg','dfs','sap','dfs','sap','h','w','h','w','g','m','w','n','y','h','l','y','w','y','i','e','e','a','o','u','u']),'Ixil':set(['p','b','c','cg','cap','capg','c','cg','cap','cap','cretr','cretrg','c''capg','cretrg','k','cretr','kg','k','kg','h','q','qg','s','sap','sretr','h','w','h','w','g','m','n','l','h','l','cap','w','y','i','e','a','o','u']),'Jacalteco':set(['p','b','t','tg','t','cap','c','cg','cap','cretr','cretrg','cap','cretr','capg','cretrg','k','kg','sap','cap','k','kg','x','q','kg','g','qg','s','sap','sretr','s','sap','sretr','h','h','x','x','g','m','n','nvel','l','y','w','y','i','e','e','a','o','u']),'Kekchi':set(['p','b','cap','capg','t','tg','c','s','cg','c','cg','cap','capg','k','k','kg','k','q','kg','qg','s','sap','sap','h','w','x','w','g','m','n','x','l','r','w','y','i','e','a','o','u']),'Yucateco':set(['p','b','cap','capg','tg','cap','capg','c','cap','t','cg','capg','cap','capg','cg','cap','capg','k','kg','sap','k','kg','k','kg','g','s','sap','sap','h','h','h','h','g','m','b','n','l','w','y','i','u','e','a','o','u','u']),'Mam':set(['p','b','c','t','cg','t','cap','c','cg','cap','cretr','capg','cap','cretr','capg','cretrg','k','cap','k','kg','k','q','qg','s','sap','s','sap','sretr','sretr','h','h','hw','h','h','g','m','n','l','h','l','cap','w','y','i','e','a','o','u']),'Mop':set(['p','b','c','cg','s','sap','h','g','m','n','h','n','l','y','w','m','y','i','e','a','o','u']),'Pocomchi':set(['p','b','cap','capg','t','tg','c','s','cg','c','cg','cap','capg','k','kg','k','kg','q','qg','s','sap','sap','sap','h','h','x','x','g','m','n','p','n','l','x','l','r','r','w','y','i','e','e','i','a','o','u']),'Pocomam':set(['p','b','cap','capg','t','tg','c','cg','c','cg','cap','capg','k','kg','k','kg','q','qg','s','sap','sap','h','h','x','x','g','m','n','n','x','l','r','w','y','i','e','e','i','a','o','u']),'Tzeltal':set(['p','b','t','tg','t','tg','c','s','cg','c','cg','cap','capg','cap','cap','capg','k','kg','k','kg','s','s','sap','sap','h','h','h','h','g','m','n','n','l','r','y','w','b','y','i','e','a','o','u']),'Quechua':set(['p','b','cap','capg','t','tg','c','cg','c','cg','cap','capg','k','kg','k','kg','q','qg','s','sap','sap','sap','h','w','h','w','g','m','n','l','h','l','r','w','y','i','e','i','a','o','u'])}
def network_from_dict( diccionario, threshold ):
G = nx.Graph()
for pair in itools.combinations( diccionario.keys(), 2 ):
source = pair[0]
target = pair[1]
jin = jaccard_index( diccionario[source], diccionario[target] )
if jin >= threshold:
G.add_edge( source, target, jin = jin)
return G
h = network_from_dict( diccionario, 0.9 )
#nx.draw(h)
jinx = []
for e in h.edges():
jinx.append(h.get_edge_data(*e)['jin'])
plt.cla
njinx = [n*10 for n in jinx]
EdgeWidth = [log(n,2) for n in njinx]
NodeSize = [2**h.degree(n) for n in nx.nodes(h)]
pos = nx.spring_layout(h)
nx.draw_networkx_labels(h, pos=pos)
nx.draw_networkx_nodes(h, pos=pos, node_size=NodeSize, label=True, alpha=0.75)
nx.draw_networkx_edges(h, pos=pos, width=EdgeWidth, alpha=0.75)
plt.show()
| gpl-3.0 |
synthicity/urbansim | urbansim/models/tests/test_dcm.py | 3 | 22138 | import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
import os
import tempfile
import yaml
from pandas.util import testing as pdt
from ...utils import testing
from .. import dcm
@pytest.fixture
def seed(request):
current = np.random.get_state()
def fin():
np.random.set_state(current)
request.addfinalizer(fin)
np.random.seed(0)
@pytest.fixture
def choosers():
return pd.DataFrame(
{'var1': range(5, 10),
'thing_id': ['a', 'c', 'e', 'g', 'i']})
@pytest.fixture
def grouped_choosers(choosers):
choosers['group'] = ['x', 'y', 'x', 'x', 'y']
return choosers
@pytest.fixture
def alternatives():
return pd.DataFrame(
{'var2': range(10, 20),
'var3': range(20, 30)},
index=pd.Index([x for x in 'abcdefghij'], name='thing_id'))
@pytest.fixture
def basic_dcm():
model_exp = 'var2 + var1:var3'
sample_size = 5
probability_mode = 'full_product'
choice_mode = 'individual'
choosers_fit_filters = ['var1 != 5']
choosers_predict_filters = ['var1 != 7']
alts_fit_filters = ['var3 != 15']
alts_predict_filters = ['var2 != 14']
interaction_predict_filters = None
estimation_sample_size = None
prediction_sample_size = None
choice_column = None
name = 'Test LCM'
model = dcm.MNLDiscreteChoiceModel(
model_exp, sample_size,
probability_mode, choice_mode,
choosers_fit_filters, choosers_predict_filters,
alts_fit_filters, alts_predict_filters,
interaction_predict_filters, estimation_sample_size,
prediction_sample_size, choice_column, name)
return model
@pytest.fixture
def basic_dcm_fit(basic_dcm, choosers, alternatives):
basic_dcm.fit(choosers, alternatives, choosers.thing_id)
return basic_dcm
def test_unit_choice_uniform(choosers, alternatives):
probabilities = [1] * len(alternatives)
choices = dcm.unit_choice(
choosers.index, alternatives.index, probabilities)
npt.assert_array_equal(choices.index, choosers.index)
assert choices.isin(alternatives.index).all()
def test_unit_choice_some_zero(choosers, alternatives):
probabilities = [0, 1, 0, 1, 1, 0, 1, 0, 0, 1]
choices = dcm.unit_choice(
choosers.index, alternatives.index, probabilities)
npt.assert_array_equal(choices.index, choosers.index)
npt.assert_array_equal(sorted(choices.values), ['b', 'd', 'e', 'g', 'j'])
def test_unit_choice_not_enough(choosers, alternatives):
probabilities = [0, 0, 0, 0, 0, 1, 0, 1, 0, 0]
choices = dcm.unit_choice(
choosers.index, alternatives.index, probabilities)
npt.assert_array_equal(choices.index, choosers.index)
assert choices.isnull().sum() == 3
npt.assert_array_equal(sorted(choices[~choices.isnull()]), ['f', 'h'])
def test_unit_choice_none_available(choosers, alternatives):
probabilities = [0] * len(alternatives)
choices = dcm.unit_choice(
choosers.index, alternatives.index, probabilities)
npt.assert_array_equal(choices.index, choosers.index)
assert choices.isnull().all()
def test_mnl_dcm_prob_choice_mode_compat(basic_dcm):
with pytest.raises(ValueError):
dcm.MNLDiscreteChoiceModel(
basic_dcm.model_expression, basic_dcm.sample_size,
probability_mode='single_chooser', choice_mode='individual')
with pytest.raises(ValueError):
dcm.MNLDiscreteChoiceModel(
basic_dcm.model_expression, basic_dcm.sample_size,
probability_mode='full_product', choice_mode='aggregate')
def test_mnl_dcm_prob_mode_interaction_compat(basic_dcm):
with pytest.raises(ValueError):
dcm.MNLDiscreteChoiceModel(
basic_dcm.model_expression, basic_dcm.sample_size,
probability_mode='full_product', choice_mode='individual',
interaction_predict_filters=['var1 > 9000'])
def test_mnl_dcm(seed, basic_dcm, choosers, alternatives):
assert basic_dcm.choosers_columns_used() == ['var1']
assert set(basic_dcm.alts_columns_used()) == {'var2', 'var3'}
assert set(basic_dcm.interaction_columns_used()) == \
{'var1', 'var2', 'var3'}
assert set(basic_dcm.columns_used()) == {'var1', 'var2', 'var3'}
loglik = basic_dcm.fit(choosers, alternatives, choosers.thing_id)
basic_dcm.report_fit()
# hard to test things exactly because there's some randomness
# involved, but can at least do a smoke test.
assert len(loglik) == 3
assert len(basic_dcm.fit_parameters) == 2
assert len(basic_dcm.fit_parameters.columns) == 3
filtered_choosers, filtered_alts = basic_dcm.apply_predict_filters(
choosers, alternatives)
probs = basic_dcm.probabilities(choosers, alternatives)
assert len(probs) == len(filtered_choosers) * len(filtered_alts)
sprobs = basic_dcm.summed_probabilities(choosers, alternatives)
assert len(sprobs) == len(filtered_alts)
pdt.assert_index_equal(
sprobs.index, filtered_alts.index, check_names=False)
npt.assert_allclose(sprobs.sum(), len(filtered_choosers))
choices = basic_dcm.predict(choosers.iloc[1:], alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['h', 'c', 'f'], index=pd.Index([1, 3, 4], name='chooser_id')))
# check that we can do a YAML round-trip
yaml_str = basic_dcm.to_yaml()
new_model = dcm.MNLDiscreteChoiceModel.from_yaml(yaml_str)
assert new_model.fitted
testing.assert_frames_equal(
basic_dcm.fit_parameters, new_model.fit_parameters)
def test_mnl_dcm_repeated_alts(basic_dcm, choosers, alternatives):
interaction_predict_filters = ['var1 * var2 > 50']
choice_column = 'thing_id'
basic_dcm.probability_mode = 'single_chooser'
basic_dcm.choice_mode = 'aggregate'
basic_dcm.interaction_predict_filters = interaction_predict_filters
basic_dcm.choice_column = choice_column
loglik = basic_dcm.fit(choosers, alternatives, 'thing_id')
basic_dcm.report_fit()
# hard to test things exactly because there's some randomness
# involved, but can at least do a smoke test.
assert len(loglik) == 3
assert len(basic_dcm.fit_parameters) == 2
assert len(basic_dcm.fit_parameters.columns) == 3
repeated_index = alternatives.index.repeat([1, 2, 3, 2, 4, 3, 2, 1, 5, 8])
repeated_alts = alternatives.loc[repeated_index].reset_index()
choices = basic_dcm.predict(choosers, repeated_alts)
pdt.assert_index_equal(choices.index, pd.Index([0, 1, 3, 4]))
assert choices.isin(repeated_alts.index).all()
def test_mnl_dcm_yaml(basic_dcm, choosers, alternatives):
expected_dict = {
'model_type': 'discretechoice',
'model_expression': basic_dcm.model_expression,
'sample_size': basic_dcm.sample_size,
'name': basic_dcm.name,
'probability_mode': basic_dcm.probability_mode,
'choice_mode': basic_dcm.choice_mode,
'choosers_fit_filters': basic_dcm.choosers_fit_filters,
'choosers_predict_filters': basic_dcm.choosers_predict_filters,
'alts_fit_filters': basic_dcm.alts_fit_filters,
'alts_predict_filters': basic_dcm.alts_predict_filters,
'interaction_predict_filters': basic_dcm.interaction_predict_filters,
'estimation_sample_size': basic_dcm.estimation_sample_size,
'prediction_sample_size': basic_dcm.prediction_sample_size,
'choice_column': basic_dcm.choice_column,
'fitted': False,
'log_likelihoods': None,
'fit_parameters': None
}
assert yaml.safe_load(basic_dcm.to_yaml()) == expected_dict
new_mod = dcm.MNLDiscreteChoiceModel.from_yaml(basic_dcm.to_yaml())
assert yaml.safe_load(new_mod.to_yaml()) == expected_dict
basic_dcm.fit(choosers, alternatives, 'thing_id')
expected_dict['fitted'] = True
del expected_dict['log_likelihoods']
del expected_dict['fit_parameters']
actual_dict = yaml.safe_load(basic_dcm.to_yaml())
assert isinstance(actual_dict.pop('log_likelihoods'), dict)
assert isinstance(actual_dict.pop('fit_parameters'), dict)
assert actual_dict == expected_dict
new_mod = dcm.MNLDiscreteChoiceModel.from_yaml(basic_dcm.to_yaml())
assert new_mod.fitted is True
def test_mnl_dcm_prob_mode_single(seed, basic_dcm_fit, choosers, alternatives):
basic_dcm_fit.probability_mode = 'single_chooser'
filtered_choosers, filtered_alts = basic_dcm_fit.apply_predict_filters(
choosers, alternatives)
probs = basic_dcm_fit.probabilities(choosers.iloc[1:], alternatives)
pdt.assert_series_equal(
probs,
pd.Series(
[0.25666709612190147,
0.20225620916965448,
0.15937989234214262,
0.1255929308043417,
0.077988133629030815,
0.061455420294827229,
0.04842747874412457,
0.038161332007195688,
0.030071506886781514],
index=pd.MultiIndex.from_product(
[[1], filtered_alts.index.values],
names=['chooser_id', 'alternative_id'])))
sprobs = basic_dcm_fit.summed_probabilities(choosers, alternatives)
pdt.assert_index_equal(
sprobs.index, filtered_alts.index, check_names=False)
npt.assert_allclose(sprobs.sum(), len(filtered_choosers))
def test_mnl_dcm_prob_mode_single_prediction_sample_size(
seed, basic_dcm_fit, choosers, alternatives):
basic_dcm_fit.probability_mode = 'single_chooser'
basic_dcm_fit.prediction_sample_size = 5
filtered_choosers, filtered_alts = basic_dcm_fit.apply_predict_filters(
choosers, alternatives)
probs = basic_dcm_fit.probabilities(choosers.iloc[1:], alternatives)
pdt.assert_series_equal(
probs,
pd.Series(
[0.11137766,
0.05449957,
0.14134044,
0.22761617,
0.46516616],
index=pd.MultiIndex.from_product(
[[1], ['g', 'j', 'f', 'd', 'a']],
names=['chooser_id', 'alternative_id'])))
sprobs = basic_dcm_fit.summed_probabilities(choosers, alternatives)
pdt.assert_index_equal(
sprobs.index,
pd.Index(['d', 'g', 'a', 'c', 'd'], name='alternative_id'))
npt.assert_allclose(sprobs.sum(), len(filtered_choosers))
def test_mnl_dcm_prob_mode_full_prediction_sample_size(
seed, basic_dcm_fit, choosers, alternatives):
basic_dcm_fit.probability_mode = 'full_product'
basic_dcm_fit.prediction_sample_size = 5
filtered_choosers, filtered_alts = basic_dcm_fit.apply_predict_filters(
choosers, alternatives)
probs = basic_dcm_fit.probabilities(choosers.iloc[1:], alternatives)
assert len(probs) == (len(filtered_choosers) - 1) * 5
npt.assert_allclose(probs.sum(), len(filtered_choosers) - 1)
sprobs = basic_dcm_fit.summed_probabilities(choosers, alternatives)
pdt.assert_index_equal(
sprobs.index, filtered_alts.index, check_names=False)
npt.assert_allclose(sprobs.sum(), len(filtered_choosers))
def test_mnl_dcm_choice_mode_agg(seed, basic_dcm_fit, choosers, alternatives):
basic_dcm_fit.probability_mode = 'single_chooser'
basic_dcm_fit.choice_mode = 'aggregate'
filtered_choosers, filtered_alts = basic_dcm_fit.apply_predict_filters(
choosers, alternatives)
choices = basic_dcm_fit.predict(choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(['f', 'a', 'd', 'c'], index=[0, 1, 3, 4]))
def test_mnl_dcm_group(seed, grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
choosers_predict_filters = ['var1 != 7']
alts_predict_filters = ['var2 != 14']
group = dcm.MNLDiscreteChoiceModelGroup('group')
group.add_model_from_params(
'x', model_exp, sample_size,
choosers_predict_filters=choosers_predict_filters)
group.add_model_from_params(
'y', model_exp, sample_size, alts_predict_filters=alts_predict_filters)
assert group.choosers_columns_used() == ['var1']
assert group.alts_columns_used() == ['var2']
assert set(group.interaction_columns_used()) == {'var1', 'var2', 'var3'}
assert set(group.columns_used()) == {'var1', 'var2', 'var3'}
assert group.fitted is False
logliks = group.fit(grouped_choosers, alternatives, 'thing_id')
assert group.fitted is True
assert 'x' in logliks and 'y' in logliks
assert isinstance(logliks['x'], dict) and isinstance(logliks['y'], dict)
probs = group.probabilities(grouped_choosers, alternatives)
for name, df in grouped_choosers.groupby('group'):
assert name in probs
filtered_choosers, filtered_alts = \
group.models[name].apply_predict_filters(df, alternatives)
assert len(probs[name]) == len(filtered_choosers) * len(filtered_alts)
filtered_choosers, filtered_alts = group.apply_predict_filters(
grouped_choosers, alternatives)
sprobs = group.summed_probabilities(grouped_choosers, alternatives)
assert len(sprobs) == len(filtered_alts)
pdt.assert_index_equal(
sprobs.index, filtered_alts.index, check_names=False)
choice_state = np.random.get_state()
choices = group.predict(grouped_choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['c', 'a', 'a', 'g'],
index=pd.Index([0, 3, 1, 4], name='chooser_id')))
# check that we don't get the same alt twice if they are removed
# make sure we're starting from the same random state as the last draw
np.random.set_state(choice_state)
group.remove_alts = True
choices = group.predict(grouped_choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['c', 'a', 'b', 'g'],
index=pd.Index([0, 3, 1, 4], name='chooser_id')))
def test_mnl_dcm_segmented_raises():
group = dcm.SegmentedMNLDiscreteChoiceModel('group', 2)
with pytest.raises(ValueError):
group.add_segment('x')
def test_mnl_dcm_segmented_prob_choice_mode_compat():
with pytest.raises(ValueError):
dcm.SegmentedMNLDiscreteChoiceModel(
'group', 10,
probability_mode='single_chooser', choice_mode='individual')
with pytest.raises(ValueError):
dcm.SegmentedMNLDiscreteChoiceModel(
'group', 10,
probability_mode='full_product', choice_mode='aggregate')
def test_mnl_dcm_segmented_prob_mode_interaction_compat():
with pytest.raises(ValueError):
dcm.SegmentedMNLDiscreteChoiceModel(
'group', 10,
probability_mode='full_product', choice_mode='individual',
interaction_predict_filters=['var1 > 9000'])
def test_mnl_dcm_segmented(seed, grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
group = dcm.SegmentedMNLDiscreteChoiceModel(
'group', sample_size, default_model_expr=model_exp)
group.add_segment('x')
group.add_segment('y', 'var3 + var1:var2')
assert group.choosers_columns_used() == []
assert group.alts_columns_used() == []
assert set(group.interaction_columns_used()) == {'var1', 'var2', 'var3'}
assert set(group.columns_used()) == {'group', 'var1', 'var2', 'var3'}
assert group.fitted is False
logliks = group.fit(grouped_choosers, alternatives, 'thing_id')
assert group.fitted is True
assert 'x' in logliks and 'y' in logliks
assert isinstance(logliks['x'], dict) and isinstance(logliks['y'], dict)
probs = group.probabilities(grouped_choosers, alternatives)
for name, df in grouped_choosers.groupby('group'):
assert name in probs
assert len(probs[name]) == len(df) * len(alternatives)
sprobs = group.summed_probabilities(grouped_choosers, alternatives)
assert len(sprobs) == len(alternatives)
pdt.assert_index_equal(
sprobs.index, alternatives.index, check_names=False)
choice_state = np.random.get_state()
choices = group.predict(grouped_choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['c', 'a', 'b', 'a', 'j'],
index=pd.Index([0, 2, 3, 1, 4], name='chooser_id')))
# check that we don't get the same alt twice if they are removed
# make sure we're starting from the same random state as the last draw
np.random.set_state(choice_state)
group._group.remove_alts = True
choices = group.predict(grouped_choosers, alternatives)
pdt.assert_series_equal(
choices,
pd.Series(
['c', 'a', 'b', 'd', 'j'],
index=pd.Index([0, 2, 3, 1, 4], name='chooser_id')))
def test_mnl_dcm_segmented_yaml(grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
group = dcm.SegmentedMNLDiscreteChoiceModel(
'group', sample_size, default_model_expr=model_exp, name='test_seg',
probability_mode='single_chooser', choice_mode='aggregate',
estimation_sample_size=20, prediction_sample_size=30)
group.add_segment('x')
group.add_segment('y', 'var3 + var1:var2')
expected_dict = {
'model_type': 'segmented_discretechoice',
'name': 'test_seg',
'segmentation_col': 'group',
'sample_size': sample_size,
'probability_mode': 'single_chooser',
'choice_mode': 'aggregate',
'choosers_fit_filters': None,
'choosers_predict_filters': None,
'alts_fit_filters': None,
'alts_predict_filters': None,
'interaction_predict_filters': None,
'estimation_sample_size': 20,
'prediction_sample_size': 30,
'choice_column': None,
'default_config': {
'model_expression': model_exp,
},
'remove_alts': False,
'fitted': False,
'models': {
'x': {
'name': 'x',
'fitted': False,
'log_likelihoods': None,
'fit_parameters': None
},
'y': {
'name': 'y',
'model_expression': 'var3 + var1:var2',
'fitted': False,
'log_likelihoods': None,
'fit_parameters': None
}
}
}
assert yaml.safe_load(group.to_yaml()) == expected_dict
new_seg = dcm.SegmentedMNLDiscreteChoiceModel.from_yaml(group.to_yaml())
assert yaml.safe_load(new_seg.to_yaml()) == expected_dict
group.fit(grouped_choosers, alternatives, 'thing_id')
expected_dict['fitted'] = True
expected_dict['models']['x']['fitted'] = True
expected_dict['models']['y']['fitted'] = True
del expected_dict['models']['x']['fit_parameters']
del expected_dict['models']['x']['log_likelihoods']
del expected_dict['models']['y']['fit_parameters']
del expected_dict['models']['y']['log_likelihoods']
actual_dict = yaml.safe_load(group.to_yaml())
assert isinstance(actual_dict['models']['x'].pop('fit_parameters'), dict)
assert isinstance(actual_dict['models']['x'].pop('log_likelihoods'), dict)
assert isinstance(actual_dict['models']['y'].pop('fit_parameters'), dict)
assert isinstance(actual_dict['models']['y'].pop('log_likelihoods'), dict)
assert actual_dict == expected_dict
new_seg = dcm.SegmentedMNLDiscreteChoiceModel.from_yaml(group.to_yaml())
assert new_seg.fitted is True
# check that the segmented model's probability mode and choice mode
# are propogated to individual segments' models
assert (
new_seg._group.models['x'].probability_mode ==
expected_dict['probability_mode'])
assert (
new_seg._group.models['y'].choice_mode ==
expected_dict['choice_mode'])
assert (
new_seg._group.models['x'].estimation_sample_size ==
expected_dict['estimation_sample_size'])
assert (
new_seg._group.models['y'].prediction_sample_size ==
expected_dict['prediction_sample_size'])
def test_segmented_dcm_removes_old_models(grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
group = dcm.SegmentedMNLDiscreteChoiceModel(
'group', sample_size, default_model_expr=model_exp)
group.add_segment('a')
group.add_segment('b')
group.add_segment('c')
group.fit(grouped_choosers, alternatives, 'thing_id')
assert sorted(group._group.models.keys()) == ['x', 'y']
def test_fit_from_cfg(basic_dcm, choosers, alternatives):
cfgname = tempfile.NamedTemporaryFile(suffix='.yaml').name
basic_dcm.to_yaml(cfgname)
dcm.MNLDiscreteChoiceModel.fit_from_cfg(
choosers, "thing_id", alternatives, cfgname)
dcm.MNLDiscreteChoiceModel.predict_from_cfg(
choosers, alternatives, cfgname)
dcm.MNLDiscreteChoiceModel.predict_from_cfg(choosers, alternatives,
cfgname, .2)
os.remove(cfgname)
def test_fit_from_cfg_segmented(grouped_choosers, alternatives):
model_exp = 'var2 + var1:var3'
sample_size = 4
group = dcm.SegmentedMNLDiscreteChoiceModel(
'group', sample_size, default_model_expr=model_exp)
group.add_segment('x')
group.add_segment('y', 'var3 + var1:var2')
cfgname = tempfile.NamedTemporaryFile(suffix='.yaml').name
group.to_yaml(cfgname)
dcm.SegmentedMNLDiscreteChoiceModel.fit_from_cfg(grouped_choosers,
"thing_id",
alternatives,
cfgname)
dcm.SegmentedMNLDiscreteChoiceModel.predict_from_cfg(grouped_choosers,
alternatives,
cfgname)
dcm.SegmentedMNLDiscreteChoiceModel.predict_from_cfg(grouped_choosers,
alternatives,
cfgname,
.8)
os.remove(cfgname)
| bsd-3-clause |
dhruv13J/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 142 | 18692 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
jkarnows/scikit-learn | sklearn/svm/tests/test_sparse.py | 95 | 12156 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/quandl/model/merged_dataset.py | 1 | 9646 | from more_itertools import unique_everseen
import pandas as pd
from six import string_types
from .model_base import ModelBase
from quandl.util import Util
from .merged_data_list import MergedDataList
from .data import Data
from quandl.message import Message
from .dataset import Dataset
class MergedDataset(ModelBase):
def __init__(self, dataset_codes, **options):
self.dataset_codes = dataset_codes
self._datasets = None
self._raw_data = None
self.options = options
@property
def column_names(self):
return self._merged_column_names_from(self.__dataset_objects__())
@property
def oldest_available_date(self):
return min(self._get_dataset_attribute('oldest_available_date'))
@property
def newest_available_date(self):
return max(self._get_dataset_attribute('newest_available_date'))
def data(self, **options):
# if there is only one column_index, use the api to fetch
# else fetch all the data and filter column indexes requested locally
dataset_data_list = [self._get_dataset_data(dataset, **options)
for dataset in self.__dataset_objects__()]
# build data frames and filter locally when necessary
data_frames = [dataset_data.to_pandas(
keep_column_indexes=self._keep_column_indexes(index))
for index, dataset_data in enumerate(dataset_data_list)]
merged_data_frame = pd.DataFrame()
for index, data_frame in enumerate(data_frames):
metadata = self.__dataset_objects__()[index]
# use code to prevent metadata api call
data_frame.rename(
columns=lambda x: self._rename_columns(metadata.code, x), inplace=True)
merged_data_frame = pd.merge(
merged_data_frame, data_frame, right_index=True, left_index=True, how='outer')
merged_data_metadata = self._build_data_meta(dataset_data_list, merged_data_frame)
# check if descending was explicitly set
# if set we need to sort in descending order
# since panda merged dataframe will
# by default sort everything in ascending
return MergedDataList(
Data, merged_data_frame, merged_data_metadata,
ascending=self._order_is_ascending(**options))
# for MergeDataset data calls
def _get_dataset_data(self, dataset, **options):
updated_options = options
# if we have only one column index, let the api
# handle the column filtering since the api supports this
if len(dataset.requested_column_indexes) == 1:
params = {'column_index': dataset.requested_column_indexes[0]}
# only change the options per request
updated_options = options.copy()
updated_options = Util.merge_options('params', params, **updated_options)
return dataset.data(**updated_options)
def _build_data_meta(self, dataset_data_list, df):
merged_data_metadata = {}
# for sanity check if list has items
if dataset_data_list:
# meta should be the same for every individual Dataset
# request, just take the first one
merged_data_metadata = dataset_data_list[0].meta.copy()
# set the start_date and end_date to
# the actual values we got back from data
num_rows = len(df.index)
if num_rows > 0:
merged_data_metadata['start_date'] = df.index[0].date()
merged_data_metadata['end_date'] = df.index[num_rows - 1].date()
# remove column_index if it exists because this would be per request data
merged_data_metadata.pop('column_index', None)
# don't use self.column_names to prevent metadata api call
# instead, get the column_names from the dataset_data_objects
merged_data_metadata['column_names'] = self._merged_column_names_from(dataset_data_list)
return merged_data_metadata
def _keep_column_indexes(self, index):
# no need to filter if we only have one column_index
# since leveraged the server to do the filtering
col_index = self.__dataset_objects__()[index].requested_column_indexes
if len(self.__dataset_objects__()[index].requested_column_indexes) == 1:
# empty array for no filtering
col_index = []
return col_index
def _rename_columns(self, code, original_column_name):
return code + ' - ' + original_column_name
def _get_dataset_attribute(self, k):
elements = []
for dataset in self.__dataset_objects__():
elements.append(dataset.__get_raw_data__()[k])
return list(unique_everseen(elements))
def _order_is_ascending(self, **options):
return not (self._in_query_param('order', **options) and
options['params']['order'] == 'desc')
def _in_query_param(self, name, **options):
return ('params' in options and
name in options['params'])
# can take in a list of dataset_objects
# or a list of dataset_data_objects
def _merged_column_names_from(self, dataset_list):
elements = []
for idx_dataset, dataset in enumerate(dataset_list):
# require getting the code from the dataset object always
code = self.__dataset_objects__()[idx_dataset].code
for index, column_name in enumerate(dataset.column_names):
# only include column names that are not filtered out
# by specification of the column_indexes list
if self._include_column(dataset, index):
# first index is the date, don't modify the date name
if index > 0:
elements.append(self._rename_columns(code, column_name))
else:
elements.append(column_name)
return list(unique_everseen(elements))
def _include_column(self, dataset_metadata, column_index):
# non-pandas/dataframe:
# keep column 0 around because we want to keep Date
if (hasattr(dataset_metadata, 'requested_column_indexes') and
len(dataset_metadata.requested_column_indexes) > 0 and
column_index != 0):
return column_index in dataset_metadata.requested_column_indexes
return True
def _initialize_raw_data(self):
datasets = self.__dataset_objects__()
self._raw_data = {}
if not datasets:
return self._raw_data
self._raw_data = datasets[0].__get_raw_data__().copy()
for k, v in list(self._raw_data.items()):
self._raw_data[k] = getattr(self, k)
return self._raw_data
def _build_dataset_object(self, dataset_code, **options):
options_copy = options.copy()
# data_codes are tuples
# e.g., ('WIKI/AAPL', {'column_index": [1,2]})
# or strings
# e.g., 'NSE/OIL'
code = self._get_request_dataset_code(dataset_code)
dataset = Dataset(code, None, **options_copy)
# save column_index param requested dynamically
# used later on to determine:
# if column_index is an array, fetch all data and use locally to filter columns
# if column_index is an empty array, fetch all data and don't filter columns
dataset.requested_column_indexes = self._get_req_dataset_col_indexes(dataset_code, code)
return dataset
def _get_req_dataset_col_indexes(self, dataset_code, code_str):
# ensure if column_index dict is specified, value is a list
params = self._get_request_params(dataset_code)
if 'column_index' in params:
column_index = params['column_index']
if not isinstance(column_index, list):
raise ValueError(
Message.ERROR_COLUMN_INDEX_LIST % code_str)
return column_index
# default, no column indexes to filter
return []
def _get_request_dataset_code(self, dataset_code):
if isinstance(dataset_code, tuple):
return dataset_code[0]
elif isinstance(dataset_code, string_types):
return dataset_code
else:
raise ValueError(Message.ERROR_ARGUMENTS_LIST_FORMAT)
def _get_request_params(self, dataset_code):
if isinstance(dataset_code, tuple):
return dataset_code[1]
return {}
def __getattr__(self, k):
if k[0] == '_' and k != '_raw_data':
raise AttributeError(k)
elif hasattr(MergedDataset, k):
return super(MergedDataset, self).__getattr__(k)
elif k in self.__dataset_objects__()[0].__get_raw_data__():
return self._get_dataset_attribute(k)
return super(MergedDataset, self).__getattr__(k)
def __get_raw_data__(self):
if self._raw_data is None:
self._initialize_raw_data()
return ModelBase.__get_raw_data__(self)
def __dataset_objects__(self):
if self._datasets:
return self._datasets
if not isinstance(self.dataset_codes, list):
raise ValueError('dataset codes must be specified in a list')
# column_index is handled by individual dataset get's
if 'params' in self.options:
self.options['params'].pop("column_index", None)
self._datasets = list([self._build_dataset_object(dataset_code, **self.options)
for dataset_code in self.dataset_codes])
return self._datasets
| mit |
dr-nate/msmbuilder | msmbuilder/commands/implied_timescales.py | 12 | 5214 | # Author: Robert McGibbon <rmcgibbo@gmail.com>
# Contributors:
# Copyright (c) 2014, Stanford University
# All rights reserved.
"""Scan the implied timescales of MarkovStateModels with respect to lag time.
This command will build a series of MarkovStateModels at different lag times,
and save a file to disk containing the relaxation timescales of each of the
models.
A plot of these data can then be used to choose the lag time [1].
References
----------
.. [1] Beauchamp, Kyle A., et al. "MSMBuilder2: modeling conformational
dynamics on the picosecond to millisecond scale." J. Chem. Theory.
Comput. 7.10 (2011): 3412-3419.
"""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import print_function, division, absolute_import
from os.path import splitext
import sys
import json
import pandas as pd
from ..dataset import dataset
from ..cmdline import Command, argument, argument_group, rangetype, FlagAction
from ..msm import MarkovStateModel, implied_timescales
class ImpliedTimescales(Command):
_group = 'MSM'
_concrete = True
description = __doc__
lag_times = argument('-l', '--lag_times', default='1:10', help='''Range
of lag times. Specify as 'start:stop' or 'start:stop:step. The
endpoints are inclusive.''', type=rangetype)
inp = argument(
'-i', '--inp', help='''Path to input dataset, a collection of 1D
integer sequences (such as the output from clustering)''',
required=True)
out = argument('--out', help='''Output file''',
default='timescales.csv')
fmt = argument('--fmt', help='Output file format', default='csv',
choices=('csv', 'json', 'excel'))
_extensions = {'csv': '.csv', 'json': '.json', 'excel': '.xlsx'}
n_jobs = argument('--n_jobs', help='Number of parallel processes',
default=1, type=int)
p = argument_group('MSM parameters')
n_timescales = p.add_argument('--n_timescales', default=10, help='''
The number of dynamical timescales to calculate when diagonalizing
the transition matrix.''', type=int)
reversible_type = p.add_argument('--reversible_type', help='''
Method by which the reversibility of the transition matrix
is enforced. 'mle' uses a maximum likelihood method that is
solved by numerical optimization, and 'transpose'
uses a more restrictive (but less computationally complex)
direct symmetrization of the expected number of counts.''',
choices=('mle', 'transpose'), default='mle')
ergodic_cutoff = p.add_argument('--ergodic_cutoff', default=1, help='''
Only the maximal strongly ergodic subgraph of the data is used to build
an MSM. Ergodicity is determined by ensuring that each state is
accessible from each other state via one or more paths involving edges
with a number of observed directed counts greater than or equal to
``ergodic_cutoff``. Not that by setting ``ergodic_cutoff`` to 0, this
trimming is effectively turned off.''', type=int)
prior_counts = p.add_argument('--prior_counts', help='''Add a number
of "pseudo counts" to each entry in the counts matrix. When
prior_counts == 0 (default), the assigned transition probability
between two states with no observed transitions will be zero, whereas
when prior_counts > 0, even this unobserved transitions will be
given nonzero probability.''', type=float, default=0)
verbose = p.add_argument('--verbose', default=True,
help='Enable verbose printout', action=FlagAction)
def __init__(self, args):
self.args = args
def start(self):
kwargs = {
'n_timescales': self.args.n_timescales,
'reversible_type': self.args.reversible_type,
'ergodic_cutoff': self.args.ergodic_cutoff,
'prior_counts': self.args.prior_counts,
'verbose': self.args.verbose,
}
with dataset(self.args.inp, mode='r') as ds:
model = MarkovStateModel(**kwargs)
lines = implied_timescales(
ds, lag_times=self.args.lag_times,
n_timescales=self.args.n_timescales,
msm=model,
n_jobs=self.args.n_jobs,
verbose=self.args.verbose)
cols = ['Timescale %d' % (d+1) for d in range(len(lines[0]))]
df = pd.DataFrame(data=lines, columns=cols)
df['Lag Time'] = self.args.lag_times
df = df.reindex_axis(sorted(df.columns), axis=1)
self.write_output(df)
def write_output(self, df):
outfile = splitext(self.args.out)[0] + self._extensions[self.args.fmt]
print('Writing %s' % outfile)
if self.args.fmt == 'csv':
df.to_csv(outfile)
elif self.args.fmt == 'json':
with open(outfile, 'w') as f:
json.dump(df.to_dict(orient='records'), f)
elif self.args.fmt == 'excel':
df.to_excel(outfile)
else:
raise RuntimeError('unknown fmt: %s' % fmt)
print('All done!')
| lgpl-2.1 |
uqyge/combustionML | ode/mlp.py | 1 | 3568 | import autograd.numpy as np
from autograd import grad, jacobian
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
from matplotlib import pyplot, cm
from mpl_toolkits.mplot3d import Axes3D
#%matplotlib inline
nx = 10
ny = 10
dx = 1. / nx
dy = 1. / ny
x_space = np.linspace(0, 1, nx)
y_space = np.linspace(0, 1, ny)
def analytic_solution(x):
return (1 / (np.exp(np.pi) - np.exp(-np.pi))) * \
np.sin(np.pi * x[0]) * (np.exp(np.pi * x[1]) - np.exp(-np.pi * x[1]))
surface = np.zeros((ny, nx))
for i, x in enumerate(x_space):
for j, y in enumerate(y_space):
surface[i][j] = analytic_solution([x, y])
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# X, Y = np.meshgrid(x_space, y_space)
# surf = ax.plot_surface(X, Y, surface, rstride=1, cstride=1, cmap=cm.viridis,
# linewidth=0, antialiased=False)
#
# ax.set_xlim(0, 1)
# ax.set_ylim(0, 1)
# ax.set_zlim(0, 2)
#
# ax.set_xlabel('$x$')
# ax.set_ylabel('$y$')
def f(x):
return 0.
def sigmoid(x):
return 1. / (1. + np.exp(-x))
def neural_network(W, x):
a1 = sigmoid(np.dot(x, W[0]))
return np.dot(a1, W[1])
def neural_network_x(x):
a1 = sigmoid(np.dot(x, W[0]))
return np.dot(a1, W[1])
def A(x):
return x[1] * np.sin(np.pi * x[0])
def psy_trial(x, net_out):
return A(x) + x[0] * (1 - x[0]) * x[1] * (1 - x[1]) * net_out
def loss_function(W, x, y):
loss_sum = 0.
for xi in x:
for yi in y:
input_point = np.array([xi, yi])
net_out = neural_network(W, input_point)[0]
net_out_jacobian = jacobian(neural_network_x)(input_point)
net_out_hessian = jacobian(jacobian(neural_network_x))(input_point)
psy_t = psy_trial(input_point, net_out)
psy_t_jacobian = jacobian(psy_trial)(input_point, net_out)
psy_t_hessian = jacobian(jacobian(psy_trial))(input_point, net_out)
gradient_of_trial_d2x = psy_t_hessian[0][0]
gradient_of_trial_d2y = psy_t_hessian[1][1]
func = f(input_point) # right part function
err_sqr = ((gradient_of_trial_d2x + gradient_of_trial_d2y) - func)**2
loss_sum += err_sqr
return loss_sum
W = [npr.randn(2, 10), npr.randn(10, 1)]
lmb = 0.001
print(neural_network(W, np.array([1, 1])))
for i in range(100):
loss_grad = grad(loss_function)(W, x_space, y_space)
W[0] = W[0] - lmb * loss_grad[0]
W[1] = W[1] - lmb * loss_grad[1]
print(loss_function(W, x_space, y_space))
surface2 = np.zeros((ny, nx))
surface = np.zeros((ny, nx))
for i, x in enumerate(x_space):
for j, y in enumerate(y_space):
surface[i][j] = analytic_solution([x, y])
for i, x in enumerate(x_space):
for j, y in enumerate(y_space):
net_outt = neural_network(W, [x, y])[0]
surface2[i][j] = psy_trial([x, y], net_outt)
print(surface[2])
print(surface2[2])
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# X, Y = np.meshgrid(x_space, y_space)
# surf = ax.plot_surface(X, Y, surface, rstride=1, cstride=1, cmap=cm.viridis,
# linewidth=0, antialiased=False)
#
# ax.set_xlim(0, 1)
# ax.set_ylim(0, 1)
# ax.set_zlim(0, 3)
#
# ax.set_xlabel('$x$')
# ax.set_ylabel('$y$');
#
#
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# X, Y = np.meshgrid(x_space, y_space)
# surf = ax.plot_surface(X, Y, surface2, rstride=1, cstride=1, cmap=cm.viridis,
# linewidth=0, antialiased=False)
#
# ax.set_xlim(0, 1)
# ax.set_ylim(0, 1)
# ax.set_zlim(0, 3)
#
# ax.set_xlabel('$x$')
# ax.set_ylabel('$y$') | mit |
badlogicmanpreet/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/collections.py | 69 | 39876 | """
Classes for the efficient drawing of large collections of objects that
share most properties, e.g. a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g. you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g. a bunch of solid
line segemnts)
"""
import copy, math, warnings
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as _colors # avoid conflict with kwarg
import matplotlib.cm as cm
import matplotlib.transforms as transforms
import matplotlib.artist as artist
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
import matplotlib.mlab as mlab
class Collection(artist.Artist, cm.ScalarMappable):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets).
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(ie a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_offsets = np.array([], np.float_)
_transOffset = transforms.IdentityTransform()
_transforms = []
zorder = 1
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds = None,
offsets = None,
transOffset = None,
norm = None, # optional for ScalarMappable
cmap = None, # ditto
pickradius = 5.0,
urls = None,
**kwargs
):
"""
Create a Collection
%(Collection)s
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_urls(urls)
self._uniform_offsets = None
self._offsets = np.array([], np.float_)
if offsets is not None:
offsets = np.asarray(offsets)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._pickradius = pickradius
self.update(kwargs)
def _get_value(self, val):
try: return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: float(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a float or nonzero sequence of floats')
def _get_bool(self, val):
try: return (bool(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: bool(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a bool or nonzero sequence of them')
def get_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asarray(offsets, np.float_)
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
return result
def get_window_extent(self, renderer):
bbox = self.get_datalim(transforms.IdentityTransform())
#TODO:check to ensure that this does not fail for
#cases other than scatter plot legend
return bbox
def _prepare_points(self):
"""Point prep for drawing and hit testing"""
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(zip(xs, ys), path.codes))
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path) for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
return transform, transOffset, offsets, paths
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
transform, transOffset, offsets, paths = self._prepare_points()
renderer.draw_path_collection(
transform.frozen(), self.clipbox, clippath, clippath_trans,
paths, self.get_transforms(),
offsets, transOffset,
self.get_facecolor(), self.get_edgecolor(), self._linewidths,
self._linestyles, self._antialiaseds, self._urls)
renderer.close_group(self.__class__.__name__)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns True | False, ``dict(ind=itemlist)``, where every
item in itemlist contains the event.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible(): return False,{}
transform, transOffset, offsets, paths = self._prepare_points()
ind = mpath.point_in_path_collection(
mouseevent.x, mouseevent.y, self._pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, len(self._facecolors)>0)
return len(ind)>0,dict(ind=ind)
def set_pickradius(self,pickradius): self.pickradius = 5
def get_pickradius(self): return self.pickradius
def set_urls(self, urls):
if urls is None:
self._urls = [None,]
else:
self._urls = urls
def get_urls(self): return self._urls
def set_offsets(self, offsets):
"""
Set the offsets for the collection. *offsets* can be a scalar
or a sequence.
ACCEPTS: float or sequence of floats
"""
offsets = np.asarray(offsets, np.float_)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
#This decision is based on how they are initialized above
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
def get_offsets(self):
"""
Return the offsets for the collection.
"""
#This decision is based on how they are initialized above in __init__()
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
ACCEPTS: float or sequence of floats
"""
if lw is None: lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
def set_linewidths(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) ]
"""
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
if ls in dashd:
dashes = [dashd[ls]]
elif ls in cbook.ls_mapper:
dashes = [dashd[cbook.ls_mapper[ls]]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
if x in dashd:
dashes.append(dashd[x])
elif x in cbook.ls_mapper:
dashes.append(dashd[cbook.ls_mapper[x]])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls)==2:
dashes = ls
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes'%ls)
self._linestyles = dashes
def set_linestyles(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_dashes(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
ACCEPTS: Boolean or sequence of booleans
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
def set_antialiaseds(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c is None: c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_facecolors(self, c):
"""alias for set_facecolor"""
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if self._edgecolors == 'face':
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'face', the edge color will always be the same as
the face color.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
else:
if c is None: c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_edgecolors(self, c):
"""alias for set_edgecolor"""
return self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float.
ACCEPTS: float
"""
try: float(alpha)
except TypeError: raise TypeError('alpha must be a float')
else:
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = _colors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if self._edgecolors_original != 'face':
self._edgecolors = _colors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
"""
If the scalar mappable array is not none, update colors
from scalar data
"""
if self._A is None: return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if len(self._facecolors):
self._facecolors = self.to_rgba(self._A, self._alpha)
else:
self._edgecolors = self.to_rgba(self._A, self._alpha)
def update_from(self, other):
'copy properties from other to self'
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Collection'] = """\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
"""
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
"""
def __init__(self, meshWidth, meshHeight, coordinates, showedges, antialiased=True):
Collection.__init__(self)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._showedges = showedges
self._antialiased = antialiased
self._paths = None
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
# By converting to floats now, we can avoid that on every draw.
self._coordinates = self._coordinates.reshape((meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self, dataTrans=None):
if self._paths is None:
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
return self._paths
#@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support quadmeshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1: ],
c[1: , 1: ],
c[1: , 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
convert_mesh_to_paths = staticmethod(convert_mesh_to_paths)
def get_datalim(self, transData):
return self._bbox
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if self.check_update('array'):
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
renderer.draw_quad_mesh(
transform.frozen(), self.clipbox, clippath, clippath_trans,
self._meshWidth, self._meshHeight, coordinates,
offsets, transOffset, self.get_facecolor(), self._antialiased,
self._showedges)
renderer.close_group(self.__class__.__name__)
class PolyCollection(Collection):
def __init__(self, verts, sizes = None, closed = True, **kwargs):
"""
*verts* is a sequence of ( *verts0*, *verts1*, ...) where
*verts_i* is a sequence of *xy* tuples of vertices, or an
equivalent :mod:`numpy` array of shape (*nv*, 2).
*sizes* is *None* (default) or a sequence of floats that
scale the corresponding *verts_i*. The scaling is applied
before the Artist master transform; if the latter is an identity
transform, then the overall scaling is such that if
*verts_i* specify a unit square, then *sizes_i* is the area
of that square in points^2.
If len(*sizes*) < *nv*, the additional values will be
taken cyclically from the array.
*closed*, when *True*, will explicitly close the polygon.
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_verts(verts, closed)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def set_verts(self, verts, closed=True):
'''This allows one to delay initialization of the vertices.'''
if closed:
self._paths = []
for xy in verts:
if np.ma.isMaskedArray(xy):
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.ma.concatenate([xy, [xy[0]]])
else:
xy = np.asarray(xy)
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
def get_paths(self):
return self._paths
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
def __init__(self, xranges, yrange, **kwargs):
"""
*xranges*
sequence of (*xmin*, *xwidth*)
*yrange*
*ymin*, *ywidth*
%(Collection)s
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [ [(xmin, ymin), (xmin, ymax), (xmin+xwidth, ymax), (xmin+xwidth, ymin), (xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
"""
Create a BrokenBarHCollection to plot horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
A :class:`BrokenBarHCollection` is returned.
*kwargs* are passed on to the collection
"""
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1]-xslice[0]))
collection = BrokenBarHCollection(xranges, [ymin, ymax-ymin], **kwargs)
return collection
class RegularPolyCollection(Collection):
"""Draw a collection of regular polygons with *numsides*."""
_path_generator = mpath.Path.unit_regular_polygon
def __init__(self,
numsides,
rotation = 0 ,
sizes = (1,),
**kwargs):
"""
*numsides*
the number of sides of the polygon
*rotation*
the rotation of the polygon in radians
*sizes*
gives the area of the circle circumscribing the
regular polygon in points^2
%(Collection)s
Example: see :file:`examples/dynamic_collection.py` for
complete example::
offsets = np.random.rand(20,2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
black = (0,0,0,1)
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors = facecolors,
edgecolors = (black,),
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
self._transforms = [
transforms.Affine2D().rotate(-self._rotation).scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
def get_sizes(self):
return self._sizes
class StarPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
zorder = 2
def __init__(self, segments, # Can be None.
linewidths = None,
colors = None,
antialiaseds = None,
linestyles = 'solid',
offsets = None,
transOffset = None,
norm = None,
cmap = None,
pickradius = 5,
**kwargs
):
"""
*segments*
a sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can be a different length.
*colors*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed).
*antialiaseds*
must be a sequence of ones or zeros
*linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
a string or dash tuple. The dash tuple is::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *linewidths*, *colors*, or *antialiaseds* is None, they
default to their rcParams setting, in sequence form.
If *offsets* and *transOffset* are not None, then
*offsets* are transformed by *transOffset* and applied after
the segments have been transformed to display coordinates.
If *offsets* is not None but *transOffset* is None, then the
*offsets* are added to the segments before any transformation.
In this case, a single offset can be specified as::
offsets=(xo,yo)
and this value will be added cumulatively to each successive
segment, so as to produce a set of successively offset curves.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
"""
if colors is None: colors = mpl.rcParams['lines.color']
if linewidths is None: linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None: antialiaseds = (mpl.rcParams['lines.antialiased'],)
self.set_linestyles(linestyles)
colors = _colors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
**kwargs)
self.set_facecolors([])
self.set_segments(segments)
def get_paths(self):
return self._paths
def set_segments(self, segments):
if segments is None: return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(seg) for seg in _segments]
set_verts = set_segments # for compatibility with PolyCollection
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i%Noffs
segs[i] = segs[i] + offsets[io:io+1]
return segs
def set_color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._edgecolors = _colors.colorConverter.to_rgba_array(c)
def color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
warnings.warn('LineCollection.color deprecated; use set_color instead')
return self.set_color(c)
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class CircleCollection(Collection):
"""
A collection of circles, drawn using splines.
"""
def __init__(self, sizes, **kwargs):
"""
*sizes*
Gives the area of the circle in points^2
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
# sizes is the area of the circle circumscribing the polygon
# in points^2
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class EllipseCollection(Collection):
"""
A collection of ellipses, drawn using splines.
"""
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
*widths*: sequence
half-lengths of first axes (e.g., semi-major axis lengths)
*heights*: sequence
half-lengths of second axes
*angles*: sequence
angles of first axes, degrees CCW from the X-axis
*units*: ['points' | 'inches' | 'dots' | 'width' | 'height' | 'x' | 'y']
units in which majors and minors are given; 'width' and 'height'
refer to the dimensions of the axes, while 'x' and 'y'
refer to the *offsets* data units.
Additional kwargs inherited from the base :class:`Collection`:
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._widths = np.asarray(widths).ravel()
self._heights = np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() *(np.pi/180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = []
self._paths = [mpath.Path.unit_circle()]
self._initialized = False
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _init(self):
def on_dpi_change(fig):
self._transforms = []
self.figure.callbacks.connect('dpi_changed', on_dpi_change)
self._initialized = True
def set_transforms(self):
if not self._initialized:
self._init()
self._transforms = []
ax = self.axes
fig = self.figure
if self._units in ('x', 'y'):
if self._units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
sc = dx1/dx0
else:
if self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
_affine = transforms.Affine2D
for x, y, a in zip(self._widths, self._heights, self._angles):
trans = _affine().scale(x * sc, y * sc).rotate(a)
self._transforms.append(trans)
def draw(self, renderer):
if True: ###not self._transforms:
self.set_transforms()
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*,
*antialiaseds* are None, they default to their
:data:`matplotlib.rcParams` patch setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
None (ie a call to set_array has been made), at draw time a
call to scalar mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.fill:
return patch.get_facecolor()
return [0, 0, 0, 0]
facecolors = [determine_facecolor(p) for p in patches]
edgecolors = [p.get_edgecolor() for p in patches]
linewidths = [p.get_linewidths() for p in patches]
antialiaseds = [p.get_antialiased() for p in patches]
Collection.__init__(
self,
edgecolors=edgecolors,
facecolors=facecolors,
linewidths=linewidths,
linestyles='solid',
antialiaseds = antialiaseds)
else:
Collection.__init__(self, **kwargs)
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
def get_paths(self):
return self._paths
artist.kwdocd['Collection'] = patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'PolyCollection', 'BrokenBarHCollection', 'RegularPolyCollection',
'StarPolygonCollection', 'PatchCollection', 'CircleCollection'):
artist.kwdocd[k] = patchstr
artist.kwdocd['LineCollection'] = artist.kwdoc(LineCollection)
| agpl-3.0 |
roselleebarle04/opencog | opencog/python/spatiotemporal/temporal_events/composition/emperical_distribution.py | 34 | 6615 | import csv
import numpy
from spatiotemporal.temporal_events.relation_formulas import TemporalRelation
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium, generate_random_events
from spatiotemporal.time_intervals import TimeInterval
__author__ = 'keyvan'
def trim_float(float_object, no_digits=12):
return int(float_object * 10 ** no_digits) / float(10 ** no_digits)
def overlaps(bounds_1, bounds_2):
a_1, b_1 = bounds_1
a_2, b_2 = bounds_2
a_1, b_1, a_2, b_2 = trim_float(a_1), trim_float(b_1), trim_float(a_2), trim_float(b_2)
return a_1 < a_2 < b_1 or a_1 < b_2 < b_1 or a_2 < a_1 < b_2 or a_2 < b_1 < b_2 or a_1 == a_2 or b_1 == b_2
def generate_random_relations(size=1000):
relations = []
A = TemporalEventTrapezium(1000, 1008, 1002, 1004)
B_as = TimeInterval(991, 1008, size)
for B_a in B_as:
B = TemporalEventTrapezium(B_a, B_a + 9, B_a + 3, B_a + 8)
relations.append((A * B).to_list())
return relations
def generate_random_relations_file(size=20):
from datetime import datetime
from spatiotemporal.time_intervals import TimeInterval
csv_writer = csv.writer(open('relations.csv~', 'w'))
year_2010 = TimeInterval(datetime(2010, 1, 1), datetime(2011, 1, 1))
i = size
specifications = [None, None]
while i >= 0:
for j in xrange(2):
a = year_2010.random_time()
beg = year_2010.random_time(start=a)
end = year_2010.random_time(start=beg) #(start=a)
b = year_2010.random_time(start=end) #(start=max(end, beg))
specifications[j] = (a, b, beg, end)
a_beg, a_end = (specifications[0][0], specifications[0][2]), (specifications[0][3], specifications[0][1])
b_beg, b_end = (specifications[1][0], specifications[1][2]), (specifications[1][3], specifications[1][1])
valid = False
for bounds_1, bounds_2 in [
(a_beg, b_beg), (a_beg, b_end), (a_end, b_beg), (a_end, b_end)
]:
if overlaps(bounds_1, bounds_2):
valid = True
break
if not valid:
continue
event_1, event_2 = TemporalEventTrapezium(*specifications[0]), TemporalEventTrapezium(*specifications[1])
csv_writer.writerow((event_1 * event_2).to_list())
percentage = (size - i + 1) / float(size) * 100
if (size - i + 1) % 10**3 == 0:
print '%' + str(int(percentage))
i -= 1
def read_data(size=1000):
csv_reader = csv.reader(open('relations.csv~', 'r'))
relations = []
i = size
ps, ms, os = [], [], []
for row in csv_reader:
p, m, o = row[0:3]
p, m, o = float(p), float(m), float(o)
if i < 0:
break
ps.append(p)
ms.append(m)
os.append(o)
i -= 1
from matplotlib import pylab as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(ps, ms, os)
ax.set_xlabel('p')
ax.set_ylabel('m')
ax.set_zlabel('o')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 1)
plt.show()
def classify(size=10000, iterable=None):
csv_reader = iterable
if iterable is None:
csv_reader = csv.reader(open('relations.csv~', 'r'))
classes = {}
for i, row in enumerate(csv_reader):
if i > size - 1:
print 'number of classes:', len(classes)
for class_type in classes:
print classes[class_type][0].type, len(classes[class_type])
return classes
relation = TemporalRelation.from_list(row)
if relation.type not in classes:
classes[relation.type] = [relation]
else:
classes[relation.type].append(relation)
print 'number of classes:', len(classes)
for class_type in classes:
print classes[class_type][0].type, len(classes[class_type])
return classes
def learn(size=10000):
classes = classify(size)
relations = classes['DSOMP']
size = len(relations)
train_size = size - size / 4
train_data = relations[0:train_size]
test_data = relations[train_size:]
train_x, train_y = [], []
for relation in train_data:
train_x.append([relation['O'], relation['M']])
train_y.append(relation['P'])
train_x = numpy.array(train_x)
train_y = numpy.array(train_y).T
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression, LassoLars, BayesianRidge, ElasticNetCV, SGDRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from random import randrange
clf = KNeighborsRegressor(8)#alpha=0.000001)
clf.fit(train_x, train_y)
test_x, test_y = [], []
for relation in test_data:
test_x.append([relation['O'], relation['M']])
test_y.append(relation['P'])
print '\n', '///////// tests ////////'
for i in xrange(5):
print 'learning', clf.predict(train_x[i])
print 'actual', train_y[i], '\n-------------\n'
print '***************************'
for i in xrange(5):
print 'learning', clf.predict(test_x[i])
print 'actual', test_y[i], '\n-------------\n'
def learn_all(size=10000):
relations = read_data(size)
size = len(relations)
train_size = size - size / 4
train_data = relations[0:train_size]
test_data = relations[train_size:]
train_x, train_y = [], []
for relation in train_data:
train_x.append(numpy.array([relation['F']]))
train_y.append(numpy.array([relation['o']]))
train_x = numpy.array(train_x)
train_y = numpy.array(train_y)
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression, LassoLars, BayesianRidge, ElasticNetCV, SGDRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from random import randrange
clf = KNeighborsRegressor()#alpha=0.000001)
clf.fit(train_x, train_y)
test_x, test_y = [], []
for relation in test_data:
test_x.append(numpy.array([relation['F']]))
test_y.append(numpy.array([relation['o']]))
print '\n', '///////// tests ////////'
for i in xrange(5):
print 'F:', train_x[i]
print 'learning', clf.predict(train_x[i])
print 'actual', train_y[i], '\n-------------\n'
print '***************************'
for i in xrange(5):
print 'F:', test_x[i]
print 'learning', clf.predict(test_x[i])
print 'actual', test_y[i], '\n-------------\n'
| agpl-3.0 |
chandupatilgithub/ml_lab_ecsc_306 | labwork/lab2/sci-learn/non_linear_regression.py | 120 | 1520 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| apache-2.0 |
RamaneekGill/Emotion-Recognition- | svm.py | 1 | 7136 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
# print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import time
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.cross_validation import check_cv
from sklearn.grid_search import GridSearchCV
from sklearn.decomposition import RandomizedPCA
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Constants
PCA_COMPONENTS = 150
GRID_SIZE = 13
start = time.time()
# Load the datset
import input_data_svm
datasets = input_data_svm.read_data_sets()
X = np.vstack((datasets.train_set.inputs(), datasets.validation_set.inputs()))
y = np.hstack((datasets.train_set.targets(), datasets.validation_set.targets()))
# X = datasets.train_set.inputs()
# y = datasets.train_set.targets()
X = X[:]
y = y[:]
# Reduce the dimensionality of the dataset
print("Applying PCA to reduce dimensions")
pca = RandomizedPCA(n_components=PCA_COMPONENTS, whiten=True).fit(X)
# eigenfaces = pca.componenets_reshape((PCA_COMPONENTS, h, w))
X = pca.transform(X)
print("Finished PCA preprocessing")
# Normalize the data
scaler = StandardScaler()
X = scaler.fit_transform(X)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
print('Start training the model')
C_range = np.logspace(-2, 10, GRID_SIZE)
gamma_range = np.logspace(-9, 3, GRID_SIZE)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.1, random_state=2015)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
print("This took {} minutes to run".format((time.time() - start)/60))
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| apache-2.0 |
Loisel/tmr3 | cartesian.py | 1 | 1488 | import numpy as np
def cartesian(arrays, out=None):
"""
Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------import matplotlib.pyplot as plt
import numpy as np
ax = [[plt.subplot(4,i+1,j+1) for i in range(2)] for j in range(2)]
x = np.linspace(0,4,50)
for i in range(2):
for j in range(2):
ax[i][j].plot(x,np.sin((i+1)*x+j*np.pi))
plt.show()
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:,0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m,1:])
for j in xrange(1, arrays[0].size):
out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
| gpl-3.0 |
rvswift/EB | EB/builder/postanalysis/postanalysis.py | 1 | 10848 | __author__ = 'robswift'
import os
import sys
import copy
import EB.builder.utilities.csv_interface as csv_interface
import EB.builder.utilities.classification as classification
import EB.builder.utilities.output as output
def run(itf):
"""
Run postanalyze functions.
"""
if not itf:
return 1
# access user input
options = SplitInput(itf)
# check input args
error_check(options)
# read input files
try:
molecules, ensemble_lookup = ReadFiles(options)
except:
return 1
if options.compare:
compare(molecules, ensemble_lookup, options)
else:
evaluate_list(molecules, ensemble_lookup, options)
def compare(molecules, ensemble_lookup, options):
"""
compare stuff
:param molecules:
:param ensemble_lookup:
:param options:
:return:
"""
print(" Analyzing differences ... ")
print('')
sort_order = classification.get_sort_order(molecules)
ensemble1 = sorted(ensemble_lookup.keys())[0]
ensemble2 = sorted(ensemble_lookup.keys())[1]
stats = {}
stats['header'] = [' ']
name = os.path.basename(ensemble1).replace('.csv', '')
stats['header'].append(name)
name = os.path.basename(ensemble2).replace('.csv', '')
stats['header'].append(name)
stats['header'].append('Difference')
stats['header'].append('95% CI')
stats['header'].append('p-value')
molecules1 = copy.deepcopy(molecules)
molecules2 = copy.deepcopy(molecules)
score_structure1 = classification.make_score_structure(molecules1, ensemble_lookup[ensemble1])
score_structure2 = classification.make_score_structure(molecules2, ensemble_lookup[ensemble2])
auc_structure_1 = classification.make_auc_structure(score_structure1)
auc_structure_2 = classification.make_auc_structure(score_structure2)
# calculate auc value differences
auc_diff = classification.calculate_auc_diff(auc_structure_1, auc_structure_2, sort_order)
stats['AUC'] = auc_diff
# calculate enrichment factor differences
fpfList = make_fpfList(options)
for fpf in fpfList:
fpf = float(fpf)
ef_structure1 = classification.make_ef_structure(score_structure1, fpf, sort_order)
ef_structure2 = classification.make_ef_structure(score_structure2, fpf, sort_order)
if ef_structure1 and ef_structure2:
ef_diff = classification.calculate_ef_diff(ef_structure1, ef_structure2, fpf)
title = 'E%s' % fpf
stats[title] = ef_diff
# write results summary
output.write_diff_summary(stats, options)
# write roc curves
if options.write_roc:
print(" Writing ROC data ... ")
print('')
output.write_roc(auc_structure_1, ensemble1, options)
output.write_roc(auc_structure_2, ensemble2, options)
# plot
if options.plot:
print(" Making plots ... ")
print('')
plotter(molecules, ensemble_lookup, options)
def evaluate_list(molecules, ensemble_lookup, options):
"""
Evaluate a list of ensembles and return statistics and ROC plots if appropriate
"""
# create stats dictionaries to store results from each ensemble
stats = {} # {file name : metric_List}
# print progress messages
if options.write_roc:
print(" Determining virtual screening performance and writing ROC data ... ")
print('')
else:
print(" Determining virtual screening performance ...")
print('')
for filename in sorted(ensemble_lookup.keys()):
metric_List = calculate_metrics(molecules, ensemble_lookup, filename, options)
stats[filename] = metric_List
# write results summary
output.write_summary(stats, options, fw_type = None)
# plot
if options.plot:
print(" Making plots ... ")
print
plotter(molecules, ensemble_lookup, options)
def calculate_metrics(molecules, ensemble_lookup, filename, options):
"""
Determine the virtual screening performance of the ensemble
:param molecules: list [mol_object_1, mol_object_2, .... ]
:param ensemble: tuple (receptor_x, receptor_y, .... )
:param options: interface object that makes command line arguments available.
:return:
"""
metric_List = [] # [(auc, auclow, auchigh), (fpf, ef, eflow, efhigh), (fpf, ef, eflow, efhigh), ..., ]
sort_order = 'asc'
# set up the appropriate score_structure data
score_structure = classification.make_score_structure(molecules, ensemble_lookup[filename])
# calculate auc values
auc_structure = classification.make_auc_structure(score_structure)
auc = classification.calculate_auc(auc_structure, sort_order)
metric_List.append(auc)
# calculate enrichment factor values
for fpf in make_fpfList(options):
fpf = float(fpf)
ef_structure = classification.make_ef_structure(score_structure, fpf, sort_order)
if ef_structure:
ef = classification.calculate_ef(ef_structure, fpf, None, 'include_intervals')
metric_List.append(ef)
if options.write_roc:
output.write_roc(auc_structure, filename, options)
return metric_List
def make_fpfList(options):
"""
aggregate default fpf values and user-defined fpf values where enrichment factor calculations will be attempted.
:param options: SplitInput object
:return defaults: list includes default fpf values & unique user-defined values
"""
user_values = options.fpf
defaults = ['0.0001', '0.001', '0.01', '0.05']
if user_values:
for fpf in user_values:
if fpf not in defaults:
defaults.append(fpf)
defaults.sort()
return defaults
def error_check(options):
"""
Error check
:rtype : object
"""
compare = options.compare
ensemble_paths = options.ensemble_paths
if compare and len(ensemble_paths) > 2:
print("\n Only 2 ensembles can be compared, {d} were specified\n".format(d=len(ensemble_paths)))
sys.exit(1)
def plotter(molecules, ensemble_lookup, options):
"""
plot ROC curves for ensembles in ensemble_lookup
:param molecules:
:param ensemble_lookup:
:param options:
:return:
"""
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
print("\n Plotting requires matplotlib to be installed\n")
sys.exit(1)
for ensemble in ensemble_lookup.keys():
# create figure
fig = plt.figure()
# create the queries subplot, the left subplot
# create the left hand subplot
ax1 = fig.add_subplot(121)
for query in sorted(ensemble_lookup[ensemble]):
query_list = []
query_list.append(query)
score_structure = classification.make_score_structure(molecules, query_list)
auc_structure = classification.make_auc_structure(score_structure)
tpf = []
fpf = []
for mol in auc_structure:
fpf.append(mol[4])
tpf.append(mol[5])
# add axis-labels and a title
ax1.set_xlabel('FPF')
ax1.set_ylabel('TPF')
title = 'query performance'
ax1.set_title(title)
# add plot data and labels for the legend
lbl = query
ax1.plot(fpf, tpf, lw=3, label=lbl)
# get legend handles and labels, then reverse their order
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles[::-1], labels[::-1])
# add the legend
ax1.legend(handles, labels, loc='best')
# create the ensemble subplot, the right subplot
score_structure = classification.make_score_structure(molecules, ensemble_lookup[ensemble])
auc_structure = classification.make_auc_structure(score_structure)
tpf = []
fpf = []
for mol in auc_structure:
fpf.append(mol[4])
tpf.append(mol[5])
# create right hand subplot
ax2 = fig.add_subplot(122)
# add axis-labels and a title
ax2.set_xlabel('FPF')
ax2.set_ylabel('TPF')
title = 'ensemble performance'
ax2.set_title(title)
# add plot data and a label for the legend
lbl = 'ensemble'
ax2.plot(fpf, tpf, lw=3, label=lbl)
# get legend handles and labels, then reverse their order
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles[::-1], labels[::-1])
# add the legend
ax2.legend(handles, labels, loc='best')
# save figure
figurename = options.outname + '_' + ensemble.replace('.csv', '') + '.pdf'
filename = os.path.join(os.getcwd(), figurename)
plt.savefig(filename, bbobx='tight', format='pdf')
def ReadFiles(options):
# set variables
ensemble_paths = options.ensemble_paths
# read ensembles into ensemble lookup {'Ensemble_N' : [query_name_list]}
ensemble_lookup = {}
for ensemble_path in ensemble_paths:
try:
ensemble_file = open(ensemble_path, 'r')
except IOError:
print("\nUnable to open ensemble_list: {l}\n".format(l=ensemble_path))
return 1
ensemble_queries = [query.strip() for query in ensemble_file.read().split(',')]
ensemble_file.close()
if len(ensemble_queries) == 0 or '' in ensemble_queries:
print("\n{l} is empty\n".format(l=ensemble_path))
return 1
key = os.path.basename(ensemble_path)
ensemble_lookup[key] = ensemble_queries
# Run consistency checks on the input csv
uniq = []
for ensemble in ensemble_lookup.keys():
for unique_query in [query for query in ensemble_lookup[ensemble] if query not in uniq]:
uniq.append(unique_query)
# read input csv
inputpath = os.path.abspath(options.inputpath)
print('')
print(" Reading input file ...")
print('')
molecules = csv_interface.read_csv(inputpath, options, uniq)
if not molecules:
print("\n '%s' was unable to be parsed\n")
sys.exit(1)
return molecules, ensemble_lookup
class SplitInput:
"""
Return command line input or print error message and return 1.
"""
def __init__(self, itf):
self.inputpath = itf.get_string('--input')
self.outname = itf.get_string('--outname')
self.fpf = itf.get_list('--fpf')
self.plot = itf.get_boolean('--plot')
self.write_roc = itf.get_boolean('--write_roc')
self.compare = itf.get_boolean('--compare')
self.ensemble_paths = itf.get_list('--ensemble_list')
self.status_field = 'status'
self.active_label = '1'
self.decoy_label = '0' | bsd-3-clause |
mjudsp/Tsallis | sklearn/ensemble/tests/test_weight_boosting.py | 58 | 17158 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
nhmc/xastropy | xastropy/spec/analysis.py | 6 | 4784 | """
#;+
#; NAME:
#; analysis
#; Version 1.0
#;
#; PURPOSE:
#; Module for Analysis of Spectra
#; 07-Sep-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import xastropy
import numpy as np
import matplotlib.pyplot as plt
import pdb
from astropy import constants as const
import xastropy.atomic as xatom
from xastropy.xutils import xdebug as xdb
#class Spectral_Line(object):
#def pixminmax(spec, zabs, wrest, vmnx):
#def x_contifit(specfil, outfil=None, savfil=None, redshift=0., divmult=1, forest_divmult=1):
# Class for Ionic columns of a given line
class Spectral_Line(object):
"""Class for analysis of a given spectral line
Attributes:
wrest: float
Rest wavelength of the spectral feature
"""
# Initialize with wavelength
def __init__(self, wrest, clm_file=None):
self.wrest = wrest
self.atomic = {} # Atomic Data
self.analy = {} # Analysis inputs (from .clm file or AbsID)
self.measure = {} # Measured quantities (e.g. column, EW, centroid)
# Fill
self.fill()
# Fill Analy
def fill(self):
import xastropy.spec.abs_line as xspa
# Data
self.atomic = xspa.abs_line_data(self.wrest)
#
self.analy['VLIM'] = [0., 0.] # km/s
self.analy['FLG_ANLY'] = 1 # Analyze
self.analy['FLG_EYE'] = 0
self.analy['FLG_LIMIT'] = 0 # No limit
self.analy['DATFIL'] = ''
self.analy['IONNM'] = self.atomic['name']
# Output
def __repr__(self):
return ('[{:s}: wrest={:g}]'.format(
self.__class__.__name__, self.wrest))
#### ###############################
def pixminmax(*args):
''' Soon to be deprecated..
Use Spectrum1D.pix_minmax()
'''
xdb.set_trace()
#### ###############################
# Calls plotvel (Crighton)
# Adapted from N. Tejos scripts
#
def velplt(specfil):
''' Soon to be deprecated..
'''
# Imports
from plotspec import plotvel_util as pspv
reload(pspv)
import xastropy as xa
from subprocess import Popen
# Initialize
if 'f26_fil' not in locals():
f26_fil = 'tmp.f26'
command = ['touch',f26_fil]
print(Popen(command))
print('xa.spec.analysis.velplt: Generated a dummy f26 file -- ', f26_fil)
if 'transfil' not in locals():
path = xa.__path__
transfil = path[0]+'/spec/Data/initial_search.lines'
# Call
pspv.main([specfil, 'f26='+f26_fil, 'transitions='+transfil])
'''
#### ###############################
# Calls Barak routines to fit the continuum
# Stolen from N. Tejos by JXP
#
def x_contifit(specfil, outfil=None, savfil=None, redshift=0., divmult=1, forest_divmult=1):
import os
import barak.fitcont as bf
from barak.spec import read
from barak.io import saveobj, loadobj
import xastropy.spec.readwrite as xsr
reload(xsr)
reload(bf)
# Initialize
if savfil == None:
savfil = 'conti.sav'
if outfil == None:
outfil = 'conti.fits'
# Read spectrum + convert to Barak format
sp = xsr.readspec(specfil)
# Fit spline continuum:
if os.path.lexists(savfil): #'contfit_' + name + '.sav'):
option = raw_input('Adjust old continuum? (y)/n: ')
if option.lower() != 'n':
co_old, knots_old = loadobj(savfil) #'contfit_' + name + '.sav')
co, knots = bf.fitqsocont(sp.wa, sp.fl, sp.er, redshift,
oldco=co_old, knots=knots_old,
divmult=divmult,
forest_divmult=forest_divmult)
else:
co, knots = bf.fitqsocont(sp.wa, sp.fl, sp.er, redshift,
divmult=divmult,
forest_divmult=forest_divmult)
else:
co, knots = bf.fitqsocont(sp.wa, sp.fl, sp.er, redshift,
divmult=divmult,
forest_divmult=forest_divmult)
os.remove('_knots.sav')
# Save continuum:
saveobj(savfil, (co, knots), overwrite=1)
# Check continuum:
print('Plotting new continuum')
plt.clf()
plt.plot(sp.wa, sp.fl, drawstyle='steps-mid')
plt.plot(sp.wa, sp.co, color='r')
plt.show()
# Repeat?
confirm = raw_input('Keep continuum? (y)/n: ')
if confirm == 'y':
fits.writeto(outfil, sp, clobber=True)
else:
print('Writing to tmp.fits anyhow!')
fits.writeto('tmp.fits', sp, clobber=True)
#print name
## Output
# Data file with continuum
'''
| bsd-3-clause |
zorroblue/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 20 | 4271 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_estimate_bandwidth_1sample():
# Test estimate_bandwidth when n_samples=1 and quantile<1, so that
# n_neighbors is set to 1.
bandwidth = estimate_bandwidth(X, n_samples=1, quantile=0.3)
assert_equal(bandwidth, 0.)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_estimate_bandwidth_with_sparse_matrix():
# Test estimate_bandwidth with sparse matrix
X = sparse.lil_matrix((1000, 1000))
msg = "A sparse matrix was passed, but dense data is required."
assert_raise_message(TypeError, msg, estimate_bandwidth, X, 200)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_almost_equal(ms1.cluster_centers_, ms2.cluster_centers_)
assert_array_equal(ms1.labels_, ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_almost_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
Shinichi-Nakagawa/no-ball-db-server | site-cookbooks/sean_lahman/files/default/script/sabr_metrics.py | 2 | 4890 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Shinichi Nakagawa'
from script.tables import Team
class SabrMetrics(object):
OUTPUT_DATA_TYPE_JSON = 'json'
OUTPUT_DATA_TYPE_FLAME = 'flame'
def __init__(self, session=None):
# パスとか設定
self.session = session
def get_pytagorian__filter_by_league(self, year, lg, data_type=OUTPUT_DATA_TYPE_JSON):
"""
ピタゴラス勝率を求める(リーグ指定)
:param year: year(required)
:param lg: league(required)
:param data_type: output data type(default:json)
:return:
"""
return self._get_pytagorian(
self.session.query(Team).filter(
and_(
Team.yearID == year,
Team.lgID == lg,
)
)
)
def get_pytagorian__filter_by_division(self, year, lg, div, data_type=OUTPUT_DATA_TYPE_JSON):
"""
ピタゴラス勝率を求める(地区指定)
:param year: year(required)
:param lg: league(required)
:param div: division(required)
:param data_type: output data type(default:json)
:return:
"""
return self._get_pytagorian(
self.session.query(Team).filter(
and_(
Team.yearID == year,
Team.lgID == lg,
Team.divID == div
)
)
)
def get_pytagorian__filter_by_team(self, year, lg, team, data_type=OUTPUT_DATA_TYPE_JSON):
"""
ピタゴラス勝率を求める(チーム指定)
:param year: year(required)
:param lg: league(required)
:param team: team name(required)
:param data_type: output data type(default:json)
:return:
"""
return self._get_pytagorian(
self.session.query(Team).filter(
and_(
Team.yearID == year,
Team.lgID == lg,
Team.teamID == team,
)
)
)
def _get_pytagorian(self, query, data_type=OUTPUT_DATA_TYPE_JSON):
"""
ピタゴラス勝率を求める
:param query: query object(required)
:param data_type: output data type(default:json)
:return:
"""
values = []
for row in query.order_by(
Team.yearID.asc(),
Team.lgID.asc(),
Team.divID.asc(),
Team.Rank.asc()
).all():
# チーム基本情報
values.append(
{
'year': row.yearID,
'team': row.teamID,
'W': row.W,
'L': row.L,
'R': row.R,
'ER': row.ER,
'pytagorian': SabrMetrics._calc_pytagorian(row.R, row.ER),
'win_percent': SabrMetrics._calc_win_percent(row.G, row.W),
}
)
if data_type == SabrMetrics.OUTPUT_DATA_TYPE_JSON:
return values
elif data_type == SabrMetrics.OUTPUT_DATA_TYPE_FLAME:
return []
else:
return values
@classmethod
def _calc_win_percent(cls, g, w):
"""
勝率計算
:param g: game
:param w: win
:return:
"""
return w / g
@classmethod
def _calc_pytagorian(cls, r, er):
"""
ピタゴラス勝率計算
:param r: 得点
:param er: 失点
:return: ピタゴラス勝率(float)
"""
return (r ** 2) / ((r ** 2) + (er ** 2))
from sqlalchemy import *
from sqlalchemy.orm import *
from script.database_config import CONNECTION_TEXT, ENCODING
import matplotlib.pyplot as plt
def main():
engine = create_engine(CONNECTION_TEXT, encoding=ENCODING)
Session = sessionmaker(bind=engine, autoflush=True)
Session.configure(bind=engine)
lh = SabrMetrics(session=Session())
values = lh.get_pytagorian__filter_by_league(2013, 'AL')
print(values)
x, y, labels = [], [], []
for value in values:
x.append(value['win_percent'])
y.append(value['pytagorian'])
labels.append({'x': value['win_percent']-0.01, 'y':value['pytagorian'], 'text': "{team}".format(**value)})
print(x)
print(y)
plt.title('Pytagorean expectation & Winning percentage')
plt.xlabel('Winning percentage')
plt.ylabel('Pythagorean expectation')
for label in labels:
plt.text(label['x'], label['y'], label['text'])
plt.plot(x, y, 'o')
plt.show()
# values = lh.get_pytagorian__filter_by_division(2013, 'NL', 'W')
# print(values)
# values = lh.get_pytagorian__filter_by_team(2011, 'AL', 'OAK')
# print(values)
if __name__ == '__main__':
main() | mit |
nuclear-wizard/moose | python/peacock/tests/postprocessor_tab/test_LineGroupWidgetPostprocessor.py | 12 | 6298 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
import os
import unittest
import shutil
from PyQt5 import QtCore, QtWidgets
from peacock.PostprocessorViewer.PostprocessorDataWidget import PostprocessorDataWidget
from peacock.PostprocessorViewer.plugins.LineGroupWidget import main
from peacock.utils import Testing
import mooseutils
class TestLineGroupWidgetPostprocessor(Testing.PeacockImageTestCase):
"""
Test class for the ArtistToggleWidget which toggles postprocessor lines.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
def copyfiles(self):
"""
Copy the data file to a local temporary.
"""
src = os.path.abspath(os.path.join(__file__, '../../input/white_elephant_jan_2016.csv'))
shutil.copyfile(src, self._filename)
def create(self, timer=False):
"""
Creates the widgets for testing.
This is done here rather than in setUp to allow for testing of delayed loading.
"""
self._reader = mooseutils.PostprocessorReader(self._filename)
self._data = PostprocessorDataWidget(self._reader, timer=timer)
# Build the widgets
self._control, self._widget, self._window = main(self._data)
self._widget.currentWidget().FigurePlugin.setFixedSize(QtCore.QSize(625, 625))
def setUp(self):
"""
Creates the GUI containing the ArtistGroupWidget and the matplotlib figure axes.
"""
self._filename = '{}_{}'.format(self.__class__.__name__, 'test.csv')
def tearDown(self):
"""
Clean up.
"""
if os.path.exists(self._filename):
os.remove(self._filename)
def testEmpty(self):
"""
Test that an empty plot is possible.
"""
self.copyfiles()
self.create()
self.assertImage('testEmpty.png')
# Test that controls are initialized and disabled correctly
self.assertEqual(self._control.AxisVariable.currentText(), "time")
self.assertFalse(self._control._toggles['time'].isEnabled(), "Time toggle should be disabled.")
def testSelect(self):
"""
Test that selecting variables works.
"""
self.copyfiles()
self.create()
vars = ['air_temp_set_1', 'precip_accum_set_1']
for var in vars:
self._control._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._toggles[var].clicked.emit()
self.assertImage('testSelect.png')
self.assertEqual('; '.join(vars), self._window.axes()[0].get_yaxis().get_label().get_text())
self.assertEqual('time', self._window.axes()[0].get_xaxis().get_label().get_text())
# Switch axis
self._control._toggles[vars[0]].PlotAxis.setCurrentIndex(1)
self._control._toggles[vars[0]].clicked.emit()
self.assertImage('testSelect2.png')
self.assertEqual(vars[0], self._window.axes()[1].get_yaxis().get_label().get_text())
self.assertEqual(vars[1], self._window.axes()[0].get_yaxis().get_label().get_text())
self.assertEqual('time', self._window.axes()[0].get_xaxis().get_label().get_text())
def testChangePrimaryVariable(self):
"""
Test that the primary variable may be modified.
"""
self.copyfiles()
self.create()
# Plot something
x_var = 'snow_water_equiv_set_1'
y_var = 'precip_accum_set_1'
self._control._toggles[y_var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._toggles[y_var].clicked.emit()
self.assertImage('testChangePrimaryVariable0.png')
# Change the primary variable
self._control.AxisVariable.setCurrentIndex(5)
self._control.AxisVariable.currentIndexChanged.emit(5)
self.assertEqual(self._control.AxisVariable.currentText(), x_var)
self.assertFalse(self._control._toggles[x_var].isEnabled(), "Toggle should be disabled.")
self.assertTrue(self._control._toggles['time'].isEnabled(), "Toggle should be enabled.")
self.assertImage('testChangePrimaryVariable1.png')
def testDelayLoadAndUnload(self):
"""
Test that delayed loading and removal of files works.
"""
self.create()
# Plot should be empty and the message should be visible.
self.assertImage('testEmpty.png')
self.assertTrue(self._control.NoDataMessage.isVisible())
# Load data
self.copyfiles()
self._data.load()
self.assertFalse(self._control.NoDataMessage.isVisible())
# Plot something
var = 'air_temp_set_1'
self._control._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._toggles[var].clicked.emit()
self.assertImage('testDelayLoadPlot.png')
# Remove data
os.remove(self._filename)
self._data.load()
self.assertTrue(self._control.NoDataMessage.isVisible())
self.assertImage('testEmpty.png')
# Re-load data
self.copyfiles()
self._data.load()
self.assertFalse(self._control.NoDataMessage.isVisible())
self._control._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._toggles[var].clicked.emit()
self.assertImage('testDelayLoadPlot2.png', allowed=0.98) # The line color/style is different because the cycle keeps going
def testRepr(self):
"""
Test script creation.
"""
self.copyfiles()
self.create()
var = 'air_temp_set_1'
self._control._toggles[var].CheckBox.setCheckState(QtCore.Qt.Checked)
self._control._toggles[var].clicked.emit()
output, imports = self._control.repr()
self.assertIn("x = data('time')", output)
self.assertIn("y = data('air_temp_set_1')", output)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
| lgpl-2.1 |
pdamodaran/yellowbrick | yellowbrick/text/tsne.py | 1 | 14415 | # yellowbrick.text.tsne
# Implements TSNE visualizations of documents in 2D space.
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Author: Rebecca Bilbro <bilbro@gmail.com>
# Created: Mon Feb 20 06:33:29 2017 -0500
#
# Copyright (C) 2016 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: tsne.py [6aa9198] benjamin@bengfort.com $
"""
Implements TSNE visualizations of documents in 2D space.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from collections import defaultdict
from yellowbrick.draw import manual_legend
from yellowbrick.text.base import TextVisualizer
from yellowbrick.style.colors import resolve_colors
from yellowbrick.exceptions import YellowbrickValueError
from sklearn.manifold import TSNE
from sklearn.pipeline import Pipeline
from sklearn.decomposition import TruncatedSVD, PCA
##########################################################################
## Quick Methods
##########################################################################
def tsne(X, y=None, ax=None, decompose='svd', decompose_by=50, classes=None,
colors=None, colormap=None, alpha=0.7, **kwargs):
"""
Display a projection of a vectorized corpus in two dimensions using TSNE,
a nonlinear dimensionality reduction method that is particularly well
suited to embedding in two or three dimensions for visualization as a
scatter plot. TSNE is widely used in text analysis to show clusters or
groups of documents or utterances and their relative proximities.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features representing the corpus of
vectorized documents to visualize with tsne.
y : ndarray or Series of length n
An optional array or series of target or class values for instances.
If this is specified, then the points will be colored according to
their class. Often cluster labels are passed in to color the documents
in cluster space, so this method is used both for classification and
clustering methods.
ax : matplotlib axes
The axes to plot the figure on.
decompose : string or None
A preliminary decomposition is often used prior to TSNE to make the
projection faster. Specify `"svd"` for sparse data or `"pca"` for
dense data. If decompose is None, the original data set will be used.
decompose_by : int
Specify the number of components for preliminary decomposition, by
default this is 50; the more components, the slower TSNE will be.
classes : list of strings
The names of the classes in the target, used to create a legend.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Sequential colormap for continuous target
alpha : float, default: 0.7
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
kwargs : dict
Pass any additional keyword arguments to the TSNE transformer.
Returns
-------
ax : matplotlib axes
Returns the axes that the parallel coordinates were drawn on.
"""
# Instantiate the visualizer
visualizer = TSNEVisualizer(
ax, decompose, decompose_by, classes, colors, colormap, alpha, **kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y, **kwargs)
visualizer.transform(X)
# Return the axes object on the visualizer
return visualizer.ax
##########################################################################
## TSNEVisualizer
##########################################################################
class TSNEVisualizer(TextVisualizer):
"""
Display a projection of a vectorized corpus in two dimensions using TSNE,
a nonlinear dimensionality reduction method that is particularly well
suited to embedding in two or three dimensions for visualization as a
scatter plot. TSNE is widely used in text analysis to show clusters or
groups of documents or utterances and their relative proximities.
TSNE will return a scatter plot of the vectorized corpus, such that each
point represents a document or utterance. The distance between two points
in the visual space is embedded using the probability distribution of
pairwise similarities in the higher dimensionality; thus TSNE shows
clusters of similar documents and the relationships between groups of
documents as a scatter plot.
TSNE can be used with either clustering or classification; by specifying
the ``classes`` argument, points will be colored based on their similar
traits. For example, by passing ``cluster.labels_`` as ``y`` in ``fit()``, all
points in the same cluster will be grouped together. This extends the
neighbor embedding with more information about similarity, and can allow
better interpretation of both clusters and classes.
For more, see https://lvdmaaten.github.io/tsne/
Parameters
----------
ax : matplotlib axes
The axes to plot the figure on.
decompose : string or None, default: ``'svd'``
A preliminary decomposition is often used prior to TSNE to make the
projection faster. Specify ``"svd"`` for sparse data or ``"pca"`` for
dense data. If None, the original data set will be used.
decompose_by : int, default: 50
Specify the number of components for preliminary decomposition, by
default this is 50; the more components, the slower TSNE will be.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Sequential colormap for continuous target
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. The random state is applied to the preliminary
decomposition as well as tSNE.
alpha : float, default: 0.7
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
kwargs : dict
Pass any additional keyword arguments to the TSNE transformer.
"""
# NOTE: cannot be np.nan
NULL_CLASS = None
def __init__(self, ax=None, decompose='svd', decompose_by=50,
labels=None, classes=None, colors=None, colormap=None,
random_state=None, alpha=0.7, **kwargs):
# Visual Parameters
self.alpha = alpha
self.labels = labels
self.colors = colors
self.colormap = colormap
self.random_state = random_state
# Fetch TSNE kwargs from kwargs by popping only keys belonging to TSNE params
tsne_kwargs = {
key: kwargs.pop(key)
for key in TSNE().get_params()
if key in kwargs
}
self.transformer_ = self.make_transformer(decompose, decompose_by, tsne_kwargs)
# Call super at the end so that size and title are set correctly
super(TSNEVisualizer, self).__init__(ax=ax, **kwargs)
def make_transformer(self, decompose='svd', decompose_by=50, tsne_kwargs={}):
"""
Creates an internal transformer pipeline to project the data set into
2D space using TSNE, applying an pre-decomposition technique ahead of
embedding if necessary. This method will reset the transformer on the
class, and can be used to explore different decompositions.
Parameters
----------
decompose : string or None, default: ``'svd'``
A preliminary decomposition is often used prior to TSNE to make
the projection faster. Specify ``"svd"`` for sparse data or ``"pca"``
for dense data. If decompose is None, the original data set will
be used.
decompose_by : int, default: 50
Specify the number of components for preliminary decomposition, by
default this is 50; the more components, the slower TSNE will be.
Returns
-------
transformer : Pipeline
Pipelined transformer for TSNE projections
"""
# TODO: detect decompose by inferring from sparse matrix or dense or
# If number of features > 50 etc.
decompositions = {
'svd': TruncatedSVD,
'pca': PCA,
}
if decompose and decompose.lower() not in decompositions:
raise YellowbrickValueError(
"'{}' is not a valid decomposition, use {}, or None".format(
decompose, ", ".join(decompositions.keys())
)
)
# Create the pipeline steps
steps = []
# Add the pre-decomposition
if decompose:
klass = decompositions[decompose]
steps.append((decompose, klass(
n_components=decompose_by, random_state=self.random_state)))
# Add the TSNE manifold
steps.append(('tsne', TSNE(
n_components=2, random_state=self.random_state, **tsne_kwargs)))
# return the pipeline
return Pipeline(steps)
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the TSNE projection
since the visualization requires both X and an optional y value. The
fit method expects an array of numeric vectors, so text documents must
be vectorized before passing them to this method.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features representing the corpus of
vectorized documents to visualize with tsne.
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class. Often cluster labels are passed in to
color the documents in cluster space, so this method is used both
for classification and clustering methods.
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
# Store the classes we observed in y
if y is not None:
self.classes_ = np.unique(y)
elif y is None and self.labels is not None:
self.classes_ = np.array([self.labels[0]])
else:
self.classes_ = np.array([self.NULL_CLASS])
# Fit our internal transformer and transform the data.
vecs = self.transformer_.fit_transform(X)
self.n_instances_ = vecs.shape[0]
# Draw the vectors
self.draw(vecs, y, **kwargs)
# Fit always returns self.
return self
def draw(self, points, target=None, **kwargs):
"""
Called from the fit method, this method draws the TSNE scatter plot,
from a set of decomposed points in 2 dimensions. This method also
accepts a third dimension, target, which is used to specify the colors
of each of the points. If the target is not specified, then the points
are plotted as a single cloud to show similar documents.
"""
# Resolve the labels with the classes
labels = self.labels if self.labels is not None else self.classes_
if len(labels) != len(self.classes_):
raise YellowbrickValueError((
"number of supplied labels ({}) does not "
"match the number of classes ({})"
).format(len(labels), len(self.classes_)))
# Create the color mapping for the labels.
self.color_values_ = resolve_colors(
n_colors=len(labels), colormap=self.colormap, colors=self.colors)
colors = dict(zip(labels, self.color_values_))
# Transform labels into a map of class to label
labels = dict(zip(self.classes_, labels))
# Expand the points into vectors of x and y for scatter plotting,
# assigning them to their label if the label has been passed in.
# Additionally, filter classes not specified directly by the user.
series = defaultdict(lambda: {'x':[], 'y':[]})
if target is not None:
for t, point in zip(target, points):
label = labels[t]
series[label]['x'].append(point[0])
series[label]['y'].append(point[1])
else:
label = self.classes_[0]
for x,y in points:
series[label]['x'].append(x)
series[label]['y'].append(y)
# Plot the points
for label, points in series.items():
self.ax.scatter(
points['x'], points['y'], c=colors[label],
alpha=self.alpha, label=label
)
def finalize(self, **kwargs):
"""
Finalize the drawing by adding a title and legend, and removing the
axes objects that do not convey information about TNSE.
"""
self.set_title(
"TSNE Projection of {} Documents".format(self.n_instances_)
)
# Remove the ticks
self.ax.set_yticks([])
self.ax.set_xticks([])
# Add the legend outside of the figure box.
if not all(self.classes_ == np.array([self.NULL_CLASS])):
box = self.ax.get_position()
self.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
manual_legend(
self, self.classes_, self.color_values_,
loc='center left', bbox_to_anchor=(1, 0.5)
)
| apache-2.0 |
karthikvadla16/spark-tk | regression-tests/sparktkregtests/testcases/models/linear_regression_test.py | 10 | 6006 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests Linear Regression Model against known values"""
import unittest
from sparktkregtests.lib import sparktk_test
class LinearRegression(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(LinearRegression, self).setUp()
dataset = self.get_file("linear_regression_gen.csv")
schema = [("c1", float),
("c2", float),
("c3", float),
("c4", float),
("label", float)]
self.frame = self.context.frame.import_csv(
dataset, schema=schema)
def test_model_save(self):
"""Test saving a linear regression model"""
model = self.context.models.regression.linear_regression.train(
self.frame, ['c1', 'c2', 'c3', 'c4'], "label")
model_path = self.get_name("LinearRegression")
model.save(model_path)
reloaded_model = self.context.load(model_path)
output = reloaded_model.test(self.frame, label_column="label")
self.assertAlmostEqual(
reloaded_model.mean_squared_error, output.mean_squared_error)
def test_model_test(self):
"""Test test functionality"""
model = self.context.models.regression.linear_regression.train(
self.frame, ['c1', 'c2', 'c3', 'c4'], "label")
output = model.test(self.frame, label_column="label")
self.assertAlmostEqual(
model.mean_squared_error, output.mean_squared_error)
self.assertAlmostEqual(
model.root_mean_squared_error, output.root_mean_squared_error)
self.assertAlmostEqual(
model.mean_absolute_error, output.mean_absolute_error)
self.assertAlmostEqual(
model.explained_variance, output.explained_variance)
def test_model_predict_output(self):
"""Test output format of predict"""
model = self.context.models.regression.linear_regression.train(
self.frame, ['c1', 'c2', 'c3', 'c4'], "label")
predict = model.predict(self.frame, ['c1', 'c2', 'c3', 'c4'])
self._validate_results(model, predict)
def test_model_elastic_net(self):
"""Test elastic net argument"""
model = self.context.models.regression.linear_regression.train(
self.frame, ['c1', 'c2', 'c3', 'c4'], "label",
elastic_net_parameter=0.3)
predict = model.predict(self.frame, ['c1', 'c2', 'c3', 'c4'])
self._validate_results(model, predict)
def test_model_fix_intercept(self):
"""Test fix intercept argument"""
model = self.context.models.regression.linear_regression.train(
self.frame, ['c1', 'c2', 'c3', 'c4'], "label",
fit_intercept=False)
predict = model.predict(self.frame, ['c1', 'c2', 'c3', 'c4'])
self._validate_results(model, predict)
def test_model_max_iterations(self):
"""Test max iterations argument"""
model = self.context.models.regression.linear_regression.train(
self.frame, ['c1', 'c2', 'c3', 'c4'], "label",
max_iterations=70)
predict = model.predict(self.frame, ['c1', 'c2', 'c3', 'c4'])
self._validate_results(model, predict)
def test_model_reg_param(self):
"""Test regularization parameter argument"""
model = self.context.models.regression.linear_regression.train(
self.frame, ['c1', 'c2', 'c3', 'c4'], "label",
reg_param=0.000000002)
predict = model.predict(self.frame, ['c1', 'c2', 'c3', 'c4'])
self._validate_results(model, predict)
def test_model_standardization(self):
"""Test test non-standardized data"""
model = self.context.models.regression.linear_regression.train(
self.frame, ['c1', 'c2', 'c3', 'c4'], "label",
standardization=False)
predict = model.predict(self.frame, ['c1', 'c2', 'c3', 'c4'])
self._validate_results(model, predict)
def test_model_tolerance(self):
"""Test test a different model tolerance"""
model = self.context.models.regression.linear_regression.train(
self.frame, ['c1', 'c2', 'c3', 'c4'], "label",
convergence_tolerance=0.0000000000000000001)
predict = model.predict(self.frame, ['c1', 'c2', 'c3', 'c4'])
self._validate_results(model, predict)
def _validate_results(self, res, predict):
# validate dictionary entries, weights, and predict results
self.assertAlmostEqual(res.mean_absolute_error, 0.0)
self.assertAlmostEqual(res.root_mean_squared_error, 0.0)
self.assertAlmostEqual(res.mean_squared_error, 0.0)
self.assertAlmostEqual(res.intercept, 0.0)
self.assertEqual(res.label_column, "label")
self.assertItemsEqual(
res.observation_columns, ['c1', 'c2', 'c3', 'c4'])
self.assertLess(res.iterations, 150)
for (i, j) in zip([0.5, -0.7, -0.24, 0.4], res.weights):
self.assertAlmostEqual(i, j, places=4)
pd_res = predict.to_pandas(predict.count())
for index, row in pd_res.iterrows():
self.assertAlmostEqual(row["label"], row["predicted_value"], places=4)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
accosmin/nanocv | exp/plotter.py | 1 | 4570 | import os
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
SMALL_SIZE = 6
MEDIUM_SIZE = 8
BIGGER_SIZE = 10
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def load_trial_csv(path, delimiter = ";"):
""" load csv file for a configuration trial with the following format:
(epoch, [train|valid|test] x [loss|error], xnorm, gnorm, seconds)+
"""
name = os.path.basename(path).replace(".csv", "")
data = mlab.csv2rec(path, delimiter=delimiter, names=None)
return name, data
def load_config_csv(path, delimiter = ";"):
""" load csv file for a configuration summary with the following format:
(trial, optimum epoch, [train|valid|test] x [loss|error], xnorm, gnorm, seconds, speed)+
"""
name = os.path.basename(path).replace(".csv", "")
data = mlab.csv2rec(path, delimiter=delimiter, names=None)
return name, data
def load_csvs(paths, loader, delimiter = ";"):
names, datas = [], []
for path in paths:
name, data = loader(path, delimiter)
names.append(name)
datas.append(data)
return names, datas
def plot_trial(spath, ppath):
""" plot the training evolution of a model """
title, data = load_trial_csv(spath)
with PdfPages(ppath) as pdf:
for ynames in (["train_loss", "valid_loss", "test_loss"], ["train_error", "valid_error", "test_error"]):
xname, yname0, yname1, yname2 = "epoch", ynames[0], ynames[1], ynames[2]
plt.plot(data[xname], data[yname0], "r-", label = yname0)
plt.plot(data[xname], data[yname1], "g-", label = yname1)
plt.plot(data[xname], data[yname2], "b-", label = yname2)
plt.title(title, weight = "bold")
plt.xlabel(xname, fontsize = "smaller")
plt.ylabel(yname0.replace("train_", ""), fontsize = "smaller")
plt.legend(fontsize = "smaller")
plt.grid(True, linestyle='--')
pdf.savefig()
plt.close()
def plot_trials(spaths, ppath):
""" plot the training evolution of multiple models on the same plot """
names, datas = load_csvs(spaths, load_trial_csv)
with PdfPages(ppath) as pdf:
for yname in ("train_loss", "train_error", "valid_loss", "valid_error", "test_loss", "test_error", "seconds"):
xname = "epoch"
for data, name in zip(datas, names):
plt.plot(data[xname], data[yname], label = name)
plt.xlabel(xname, fontsize = "smaller")
plt.ylabel(yname.replace("train_", "").replace("valid_", "").replace("test_", ""), fontsize = "smaller")
plt.title(yname, weight = "bold")
plt.legend(fontsize = "smaller")
plt.grid(True, linestyle='--')
pdf.savefig()
plt.close()
def plot_configs(spaths, ppath, names):
""" plot the test results of multiple models on the sample plot """
__, datas = load_csvs(spaths, load_config_csv)
assert(len(names) == len(datas))
title = os.path.basename(ppath).replace(".pdf", "").replace("result_", "")
with PdfPages(ppath) as pdf:
for yname in ("epoch", "train_loss", "train_error", "valid_loss", "valid_error", "test_loss", "test_error", "seconds", "speed"):
bdata = []
for data in datas:
bdata.append(data[yname])
plt.boxplot(bdata, labels = names)
plt.title(title, weight = "bold")
plt.ylabel(yname, fontsize = "smaller")
plt.grid(True, linestyle='--')
pdf.savefig()
plt.close()
| mit |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/util/decorators.py | 9 | 9665 | from pandas.compat import StringIO, callable
from pandas.lib import cache_readonly
import sys
import warnings
from functools import wraps
def deprecate(name, alternative, alt_name=None):
alt_name = alt_name or alternative.__name__
def wrapper(*args, **kwargs):
warnings.warn("%s is deprecated. Use %s instead" % (name, alt_name),
FutureWarning, stacklevel=2)
return alternative(*args, **kwargs)
return wrapper
def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2):
"""Decorator to deprecate a keyword argument of a function
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str
Name of prefered argument in function
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
"""
if mapping is not None and not hasattr(mapping, 'get') and \
not callable(mapping):
raise TypeError("mapping from old to new argument values "
"must be dict or callable!")
def _deprecate_kwarg(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_arg_value = kwargs.pop(old_arg_name, None)
if old_arg_value is not None:
if mapping is not None:
if hasattr(mapping, 'get'):
new_arg_value = mapping.get(old_arg_value,
old_arg_value)
else:
new_arg_value = mapping(old_arg_value)
msg = "the %s=%r keyword is deprecated, " \
"use %s=%r instead" % \
(old_arg_name, old_arg_value,
new_arg_name, new_arg_value)
else:
new_arg_value = old_arg_value
msg = "the '%s' keyword is deprecated, " \
"use '%s' instead" % (old_arg_name, new_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name, None) is not None:
msg = "Can only specify '%s' or '%s', not both" % \
(old_arg_name, new_arg_name)
raise TypeError(msg)
else:
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return wrapper
return _deprecate_kwarg
# Substitution and Appender are derived from matplotlib.docstring (1.1.0)
# module http://matplotlib.sourceforge.net/users/license.html
class Substitution(object):
"""
A decorator to take a function's docstring and perform string
substitution on it.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter)
Usage: construct a docstring.Substitution with a sequence or
dictionary suitable for performing substitution; then
decorate a suitable function with the constructed object. e.g.
sub_author_name = Substitution(author='Jason')
@sub_author_name
def some_function(x):
"%(author)s wrote this function"
# note that some_function.__doc__ is now "Jason wrote this function"
One can also use positional arguments.
sub_first_last_names = Substitution('Edgar Allen', 'Poe')
@sub_first_last_names
def some_function(x):
"%s %s wrote the Raven"
"""
def __init__(self, *args, **kwargs):
if (args and kwargs):
raise AssertionError( "Only positional or keyword args are allowed")
self.params = args or kwargs
def __call__(self, func):
func.__doc__ = func.__doc__ and func.__doc__ % self.params
return func
def update(self, *args, **kwargs):
"Assume self.params is a dict and update it with supplied args"
self.params.update(*args, **kwargs)
@classmethod
def from_params(cls, params):
"""
In the case where the params is a mutable sequence (list or dictionary)
and it may change before this class is called, one may explicitly use a
reference to the params rather than using *args or **kwargs which will
copy the values and not reference them.
"""
result = cls()
result.params = params
return result
class Appender(object):
"""
A function decorator that will append an addendum to the docstring
of the target function.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter).
Usage: construct a docstring.Appender with a string to be joined to
the original docstring. An optional 'join' parameter may be supplied
which will be used to join the docstring and addendum. e.g.
add_copyright = Appender("Copyright (c) 2009", join='\n')
@add_copyright
def my_dog(has='fleas'):
"This docstring will have a copyright below"
pass
"""
def __init__(self, addendum, join='', indents=0):
if indents > 0:
self.addendum = indent(addendum, indents=indents)
else:
self.addendum = addendum
self.join = join
def __call__(self, func):
func.__doc__ = func.__doc__ if func.__doc__ else ''
self.addendum = self.addendum if self.addendum else ''
docitems = [func.__doc__, self.addendum]
func.__doc__ = self.join.join(docitems)
return func
def indent(text, indents=1):
if not text or not isinstance(text, str):
return ''
jointext = ''.join(['\n'] + [' '] * indents)
return jointext.join(text.split('\n'))
def suppress_stdout(f):
def wrapped(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapped
class KnownFailureTest(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
def knownfailureif(fail_condition, msg=None):
"""
Make function raise KnownFailureTest exception if given condition is true.
If the condition is a callable, it is used at runtime to dynamically
make the decision. This is useful for tests that may require costly
imports, to delay the cost until the test suite is actually executed.
Parameters
----------
fail_condition : bool or callable
Flag to determine whether to mark the decorated test as a known
failure (if True) or not (if False).
msg : str, optional
Message to give on raising a KnownFailureTest exception.
Default is None.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when `skip_condition` is True, and the function
to be called normally otherwise.
Notes
-----
The decorator itself is decorated with the ``nose.tools.make_decorator``
function in order to transmit function name, and various other metadata.
"""
if msg is None:
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
if callable(fail_condition):
fail_val = fail_condition
else:
fail_val = lambda: fail_condition
def knownfail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureTest(msg)
else:
return f(*args, **kwargs)
return nose.tools.make_decorator(f)(knownfailer)
return knownfail_decorator
def make_signature(func) :
"""
Returns a string repr of the arg list of a func call, with any defaults
Examples
--------
>>> def f(a,b,c=2) :
>>> return a*b*c
>>> print(_make_signature(f))
a,b,c=2
"""
from inspect import getargspec
spec = getargspec(func)
if spec.defaults == None :
n_wo_defaults = len(spec.args)
defaults = ('',) * n_wo_defaults
else :
n_wo_defaults = len(spec.args) - len(spec.defaults)
defaults = ('',) * n_wo_defaults + spec.defaults
args = []
for i, (var, default) in enumerate(zip(spec.args, defaults)) :
args.append(var if default=='' else var+'='+repr(default))
if spec.varargs:
args.append('*' + spec.varargs)
if spec.keywords:
args.append('**' + spec.keywords)
return args, spec.args
| apache-2.0 |
untom/scikit-learn | sklearn/metrics/__init__.py | 52 | 3394 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
Biles430/FPF_PIV | FPF_PIV_CODE/Drummonds_Scripts/PIV_readin.py | 1 | 11334 | #create movie
#from __future__ import print_function
#import os
import pandas as pd
import numpy as np
import PIV
import time
import sys
import h5py
from scipy.signal import medfilt
import matplotlib.pyplot as plt
import hotwire as hw
################################################################
# PURPOSE
# 1. Readin in PIV data_sets
# 2. Find Mask
# 3. Resize
# 4. Determine mean profiles and HO quantities
################################################################
#Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
def piv_readin_mod(base_name_input, data_sets, sizex, sizey):
#setup range of datasets
x_range = np.arange(1, data_sets+1)
#initalize data
count = 0
temp_u = np.ndarray([len(x_range), sizex, sizey])
temp_v = np.ndarray([len(x_range), sizex, sizey])
#setup progressbar
printProgressBar(0, len(x_range), prefix = 'Reading In:', suffix = 'Complete', length = 50)
for i in x_range:
#create file name for each txt file
loc = base_name_input + '/B' + str('{0:05}'.format(i)) + '.txt'
#read in txt file but skip first row
temp = pd.read_csv(loc, sep='\t', skiprows=1, header=None)
#rename columns to designated davis output
temp.columns = ['Xlocation (mm)', 'Ylocation (mm)', 'U (m/sec)', 'V (m/sec)']
#reorganize into seperate arrays
temp_x = np.array(np.reshape(temp['Xlocation (mm)'], (sizex, sizey)))
temp_y = np.array(np.reshape(temp['Ylocation (mm)'], (sizex, sizey)))
temp_u[count] = np.array(np.reshape(temp['U (m/sec)'], (sizex, sizey)))
temp_v[count] = np.array(np.reshape(temp['V (m/sec)'], (sizex, sizey)))
count+=1
printProgressBar(i, len(x_range), prefix = 'Reading In:', suffix = 'Complete', length = 50)
x_axis = temp_x[0]
y_axis = temp_y[:,0]
print('Done Read in!')
#sys.stdout.write("\n")
return(x_axis, y_axis, temp_u, temp_v, x_range)
#This function is used to set bad and non-physical images to a nan value
def filt_images(u_vel, v_vel, Uinfinity, sizey):
#count number of bad images
count1 = 0
#initalize the mean values which images will be filtered on
Umean_top = np.zeros(len(u_vel))
#compute means for top of images after above zero filter has been applied
for j in range(0, len(u_vel[0,:])):
Umean_top[j] = np.mean(np.mean(u_vel[j, int(2*(sizey/3)):-1]))
####remove all images which have ~zero mean such that when STD filter is appled
# the average is not skewed to towards zero
for j in range(0, len(v_vel[0,:])):
if Umean_top[j] < Uinfinity/10:
u_vel[0, j] = np.nan
v_vel[0, j] = np.nan
count1+=1
#compute new means for top of images after above zero filter has been applied
for j in range(0, len(u_vel[0,:])):
Umean_top[j] = np.mean(np.mean(u_vel[j, int(2*(sizey/3)):-1]))
####Apply STD filter
#number of times to iterate through STD filter
num_loops = 4
#width of filter in STD
filter_width = 1
for k in range(0, num_loops):
#compute mean of top 1/3 of image for STD filtering
for j in range(0, len(u_vel[0,:])):
Umean_top[j] = np.mean(np.mean(u_vel[j, int(2*(sizey/3)):-1]))
#STD filter
for j in range(0, len(u_vel[0,:])):
#remove images with average value less than avg - x*STD
if Umean_top[j] < np.nanmean(Umean_top) - filter_width * np.sqrt(np.nanvar(Umean_top)):
u_vel[0, j] = np.nan
v_vel[0, j] = np.nan
count1+=1
#remove images with average value greater than avg - x*STD
if Umean_top[j] > np.nanmean(Umean_top) + filter_width * np.sqrt(np.nanvar(Umean_top)):
u_vel[0, j] = np.nan
v_vel[0, j] = np.nan
count1+=1
return(u_vel, v_vel, count1)
## to work on ##
################
# -work to read in top line of PIV file such that size and units and be predetermined
# determine better way to store PIV datasets
## INITIAL CODE USED FOR READING IN
#run air_prop.py
#Parameter set
date = '072117'
filter_width = 21
num_images = 10911
sizex = 129
sizey = 129
walloffset = 7.25 #mm
side_error = 5
u_infinity = 4.5
#list name of data set folders
base_name = dict()
#List the base name for each test to be read in and analyzed, names taken directly from folder
base_name[0] = '../../../../../../../Local_files/FPF/test_' + date + '/data/Cam_Date=170721_Time=160651_TR_SeqPIV_MP(2x16x16_50ov_ImgCorr)=unknown_02'
#Initalize variables
num_tests = len(base_name)
u = np.ndarray([num_tests, num_images, sizey, sizex])
v = np.ndarray([num_tests, num_images, sizey, sizex])
v_filt = np.ndarray([num_tests, num_images, sizey, sizex])
u_filt = np.ndarray([num_tests, num_images, sizey, sizex])
umean = np.ndarray([num_tests, sizey, sizex])
#vmean1 = np.ndarray([num_tests, sizey, sizex])
#mask = np.zeros([num_tests, 3])
umean_profile = dict()
vmean_profile = dict()
urms_profile = dict()
vrms_profile = dict()
uvprime_profile = dict()
for j in base_name:
#Read in
[x, y, u[j], v[j], x_range] = piv_readin_mod(base_name[j], num_images, sizey, sizex)
#Filter Images
[u_filt[j], v_filt[j], bad_im_count] = filt_images(u[j], v[j], u_infinity, sizey)
#Obtain mean vector field
umean[j] = np.nanmean(u_filt[j, :], axis=0)
#vmean1[j] = np.nanmean(v_filt[j, :], axis=0)
#determine mask position
tempmask = PIV.mask_loc(umean[j])
mask = list(tempmask)
#use this to find the mean vel in each image, and look for bad images
## Resize vecotor field to crop out masked areas and
# create new vectors which take out the masked areas and any side errors
sizex_mask = mask[3] - mask[2] - side_error*2
sizey_mask = mask[1] - mask[0]
Umask = np.ndarray([num_tests, num_images, sizey_mask, sizex_mask])
Vmask = np.ndarray([num_tests, num_images, sizey_mask, sizex_mask])
umean = np.ndarray([num_tests, sizey_mask, sizex_mask])
vmean = np.ndarray([num_tests, sizey_mask, sizex_mask])
for j in base_name:
Umask[j] = u_filt[j][:, mask[0]:mask[1], int(mask[2]+side_error):int(mask[3]-side_error)]
Vmask[j] = v_filt[j][:, mask[0]:mask[1], int(mask[2]+side_error):int(mask[3]-side_error)]
umean[j] = np.nanmean(Umask[j], axis=0)
vmean[j] = np.nanmean(Vmask[j], axis=0)
## Determine RMS quantities ##
uprime = np.ndarray([num_tests, num_images, sizey_mask, sizex_mask])
vprime = np.ndarray([num_tests, num_images, sizey_mask, sizex_mask])
uvprime = np.ndarray([num_tests, num_images, sizey_mask, sizex_mask])
uvprime_mean = np.ndarray([num_tests, sizey_mask, sizex_mask])
urms = np.ndarray([num_tests, sizey_mask, sizex_mask])
vrms = np.ndarray([num_tests, sizey_mask, sizex_mask])
for j in range(0, num_tests):
for jj in range(0, num_images):
uprime[j, jj] = ((Umask[j][jj]-umean[j]))
vprime[j, jj] = ((Vmask[j][jj]-vmean[j]))
uvprime[j, jj] = uprime[j, jj]*vprime[j, jj]
uvprime_mean[j] = np.nanmean(uvprime[j], axis=0)
urms[j] = np.nanmean(uprime[j]**2, axis=0)**(1/2)
vrms[j] = np.nanmean(vprime[j]**2, axis=0)**(1/2)
## wall position adjustment ###########
#convert to m and take off wall position as seen in images
x = (x)/1000
y = (y-walloffset)/1000
xmask = x[ (mask[2]+side_error):(mask[3]-side_error) ]
ymask = y[ mask[0]:mask[1] ]
## Create Mean Profiles for each data set#######
for j in range(0, num_tests):
umean_profile[j] = np.mean(umean[j], axis=1)
vmean_profile[j] = np.mean(vmean[j], axis=1)
urms_profile[j] = np.mean(urms[j], axis=1)
vrms_profile[j] = np.mean(vrms[j], axis=1)
uvprime_profile[j] = np.mean(uvprime_mean[j], axis=1)
## Average multiple profiles together
#use this if multiple tests are performed at the same condition
umean_profile_avg = np.zeros(len(umean_profile[0]))
vmean_profile_avg = np.zeros(len(umean_profile[0]))
urms_profile_avg = np.zeros(len(umean_profile[0]))
vrms_profile_avg = np.zeros(len(umean_profile[0]))
uvprime_profile_avg = np.zeros(len(umean_profile[0]))
#average datasets together
for j in range(0, num_tests):
umean_profile_avg = umean_profile_avg + umean_profile[j]
vmean_profile_avg = vmean_profile_avg + vmean_profile[j]
urms_profile_avg = urms_profile_avg + urms_profile[j]
vrms_profile_avg = vrms_profile_avg + vrms_profile[j]
uvprime_profile_avg = uvprime_profile_avg + uvprime_profile[j]
#divide profiles by number of tests which were combined
umean_profile_avg = umean_profile_avg / num_tests
vmean_profile_avg = vmean_profile_avg / num_tests
urms_profile_avg = urms_profile_avg / num_tests
vrms_profile_avg = vrms_profile_avg / num_tests
uvprime_profile_avg = uvprime_profile_avg / num_tests
##calculate conf interval
conf = dict()
Neff = 75
conf['u'] = (np.nanmean(np.nanmean(np.nanvar(Umask, axis=1), axis=0), axis=1))**(1/2) * (1/Neff)**(1/2)
conf['v'] = (np.nanmean(np.nanmean(np.nanvar(Vmask, axis=1), axis=0), axis=1))**(1/2) * (1/Neff)**(1/2)
conf['urms'] = (np.nanmean(np.nanvar(urms, axis=0), axis=1))**(1/2) * (1/(2*Neff-1))**(1/2)
conf['vrms'] = (np.nanmean(np.nanvar(vrms, axis=0), axis=1))**(1/2) * (1/(2*Neff-1))**(1/2)
sigma_u = (np.nanmean(np.nanvar(Umask, axis=1), axis=0))**(1/2)
sigma_v = (np.nanmean(np.nanvar(Vmask, axis=1), axis=0))**(1/2)
conf['uvprime'] = np.nanmean(sigma_u * sigma_v * (1+ (np.nanmean(uvprime_mean, axis=0)/(sigma_u * sigma_v))**2 / (Neff - 1))**(1/2), axis=1)
### WRITE OUT DATA
####################
#open hdf5 file
hdf = pd.HDFStore('data/PIV_' + date + '.h5')
hdf.put('umean', pd.DataFrame(umean[0]))
hdf.put('vmean', pd.DataFrame(vmean[0]))
hdf.put('umean_profile_avg', pd.DataFrame(umean_profile_avg))
hdf.put('vmean_profile_avg', pd.DataFrame(vmean_profile_avg))
hdf.put('urms_profile_avg', pd.DataFrame(urms_profile_avg))
hdf.put('vrms_profile_avg', pd.DataFrame(vrms_profile_avg))
hdf.put('uvprime_profile_avg', pd.DataFrame(uvprime_profile_avg))
hdf.put('confidence', pd.DataFrame(conf))
hdf.put('xaxis', pd.Series(xmask))
hdf.put('yaxis', pd.Series(ymask))
hdf.put('mask', pd.DataFrame(mask))
hdf.close()
print('Data Saved!')
#files = []
#fig, ax = plt.subplots(figsize=(5,5))
#for i in range(0, num_images):
# plt.cla()
# plt.imshow(Umask[0, i])
# plt.colorbar()
# fname = '_tmp%03d.png'%i
# print('Saving frame', fname)
# plt.savefig(fname)
# files.append(fname)
#print('Making movie animation.mpg - this make take a while')
#os.system("mencoder 'mf://_tmp*.png' -mf type=png:fps=10 -ovc lavc -lavcopts vcodec=wmv2 -oac copy -o animation.mpg")
#os.system("convert _tmp*.png animation.mng")
# cleanup
#for fname in files: os.remove(fname)
| mit |
bigswitch/snac-nox | src/scripts/buildtest/performance.py | 1 | 10895 | #!/usr/bin/python
#
# TARP
# 3/13/08
# Prototype to print out results of running a couple million packets
# through various configurations of NOX and packet handling
#
# TARP
# 3/14/08
# Corrected resource consumption, switched from time() to getrusage(),
# print out a few more statistics to get a feel for the ranges expected.
#
# TARP
# 3/17/08
# Added graphing via matplotlib/pylab, lines and such, classes to hold data
#
# TARP
# 3/18/08
# Included bar plot support, automatically used for discrete variables
#
# TARP
# 3/19/08
# Restructuring so that data control smoother, seperating locating points
# from plotting them, adding standard deviation support for viewability
# creating dataset class
# ... 3/20/08
# Sleep the wee hours. Total reformat complete, old version left in for final
# reviewing, soon to be trimmed, probably only keep a barplot method.
# More minor changes with legend, visual aspects.
#
# TARP
# 4/24/08
# Returning after directory rehash, only change should hopefully just be
# trivial command change for testing purposes.
#
# TARP
# 5/13/08
# Beginning effort to incorporate performance into build testing
#
# TARP
# 5/14/08
# Stripped Info() and derived classes into info.py. Additionally placed
# all of the graphing functions into graph.py.
#
# Fixes silly pylab dependency error
import matplotlib
matplotlib.use('Agg')
import os
import pwd
import pickle
import info
import graph
from utilities import name_test, Notary, Command
from datetime import datetime
class Benchmarker:
user = pwd.getpwuid(os.getuid())[0]
build_path = "/tmp/build/%s/" % user
build_default = "default/"
src_directory = "/home/%s/base_directory/asena/" % user # Only for gitinfo
# This is probably incorrect, check against builder
#? Should we need once integrated?
oxide = Command("nox_core -i pgen:","./src/","oxide pgen")
#T For now... leaving old version
test_command = oxide.command
test_subdir = oxide.directory[len('./'):]
test_finish = 'exit'
test_all = ["noop", "switch"]
test_options = {"twisted":["pynoop", "sepl routing"], "ndebug":[]}
#test_options = {"twisted":["pynoop"], "ndebug":[]}
notary = Notary() # No individual log file
note = notary.note # This is a function
def __init__(self, input=False, output=None):
if output == None: output = input
self.retrieving = input
self.storing = output
self.raw_points = []
self.test_packets = 100000
self.control_packets = 1000
if self.storing and not os.access(self.storing,os.F_OK):
self.note("Creating new archive at %s" % self.storing)
pickle.dump([],open(self.storing,'w'))
if self.retrieving:
try:
self.raw_points = pickle.load(open(self.retrieving,'r'))
except:
self.note("Error unpickling %s, continuing..." % \
self.retrieving,'shallow')
def run_tests(self, logged_call, option_set, make_total_time):
from subprocess import Popen, PIPE
from resource import getrusage, RUSAGE_CHILDREN
start_use = getrusage(RUSAGE_CHILDREN)[0:2]
testing = info.Test()
testing.configuration = option_set
result = info.Result()
build = info.Build()
# Is passed 'None' when not fully built
if make_total_time:
result.total = make_total_time
testing.command = 'make'
self.raw_points.append(info.RawData(test=testing, \
result=result, \
build=build))
testing.packets = self.test_packets
subpath = self.build_path + name_test(option_set)
self.note("%s starting ..." % subpath)
for tests in [self.test_all] + \
[self.test_options[opt] for opt in option_set]:
# This does not take advantage of multiple processors
for test in tests:
testing.command = test
run = self.test_command + str(self.test_packets)+" "+test
if 'sepl' in self.test_command:
run += " --verbose='sepl:ANY:ERR'"
self.note("Executing: ./%s ... " % run,'deep')
f = open('/dev/null','w')
# Correct for start-up time somewhat
older = getrusage(RUSAGE_CHILDREN)[0:2]
ctl = self.test_command + str(self.control_packets)+" "+test
p = Popen(subpath+self.test_subdir+ctl+" "+self.test_finish,\
stdout=f, stderr=f, \
shell=True, cwd=subpath + self.test_subdir).wait()
old = getrusage(RUSAGE_CHILDREN)[0:2]
p = Popen(subpath+self.test_subdir+run+" "+self.test_finish,\
stdout=f, stderr=f, \
#? Have to send this somewhere else (than PIPE), sepl
# routing gets massive amounts of data, better off
# going to /dev/null if I don't use anyway
# sepl seems to crash if PIPE used even with 1M pkt
shell=True, cwd=subpath + self.test_subdir).wait()
new = getrusage(RUSAGE_CHILDREN)[0:2]
result.user = (new[0] - old[0]) - (old[0] - older[0])
result.system = (new[1] - old[1]) - (old[1] - older[1])
result.total = result.user + result.system
retval = p and 'false' or 'true' # i.e.: the unix command
c = Command(retval, '/bin/', '%s [performance] (%.2f)' \
% (test, result.total))
c.logdir = False
logged_call(c)
if p != 0:
#! This doesn't catch pynoop's problem
self.note("Execution failed ... continuing",'shallow')
continue
outcome =info.RawData(test=testing,result=result,build=build)
self.raw_points.append(outcome)
self.note("Took: %ss" % result.total,'deep')
if(outcome.pps):
self.note("Pkt/sec: %d" % \
(testing.packets/result.total), 'deep')
if(outcome.spmp):
self.note("sec/MPkt: %.2f" % \
(10**6*result.total/testing.packets),'deep')
end_use = getrusage(RUSAGE_CHILDREN)[0:2]
elapsed = (end_use[0] + end_use[1]) - (start_use[0] + start_use[1])
if self.storing:
self.note("Archiving result in %s" % self.storing, 'deep')
pickle.dump(self.raw_points,open(self.storing,'w'))
#! This could be simplified readily, particularly if the
# significant digits of seconds stay constant throughout.
if elapsed > 60*60:
self.note("Total time: %dh %dm %ds" % \
(elapsed / (60*60), (elapsed % (60*60)) /60, elapsed % 60))
elif elapsed > 60:
self.note("Total time: %dm %.2fs" % (elapsed / 60, elapsed % 60))
else:
self.note("Total time: %.4fs" % elapsed)
self.note("%s finished" % subpath)
def show_results(self):
# Graphing seconds per mega-packet vs commit date
search_profile = info.Profile()
search_build = info.Build()
search_test = info.Test()
search_result = info.Result()
# Independent Variable
search_build.build_date = True
# Not considered:
search_profile = False
search_build.commit = False
search_build.last_author = False
search_test.rules = False
search_test.policies = False
search_result = False
# All values of:
search_test.configuration = None
search_test.packets = None
# By not recreating grapher each time, expose self to the
# xx-small font bug if too much to show for any one of these
# This is deliberate, good incentive to fix =)
g = graph.Grapher(self.raw_points,'placeholder') # The name of the image
#######################
# pgen:1000000 sepl routing [audit exit]
# Exact match:
search_test.command = 'sepl routing'
search = info.RawData(profile = search_profile, \
build = search_build, \
test = search_test, \
result = search_result)
g.image_dst = 'sepl_routing_cmd'
g.graph('build.build_date', 'pps', search)
#######################
# pgen:10000000 switch
# Exact match:
search_test.command = 'switch'
search = info.RawData(profile=search_profile,
build=search_build,
test=search_test,
result=search_result)
#! Write a function for this assignment
g.image_dst = 'switch_cmd'
g.graph('build.build_date', 'pps', search)
#######################
# pgen:10000000 noop
# exact match:
search_test.command = 'noop'
search = info.RawData(profile=search_profile,
build=search_build,
test=search_test,
result=search_result)
#! write a function for this assignment
g.image_dst = 'noop_cmd'
g.graph('build.build_date', 'pps', search)
#######################
# pgen:10000000 pynoop
# exact match:
search_test.command = 'pynoop'
search = info.RawData(profile=search_profile,
build=search_build,
test=search_test,
result=search_result)
#! write a function for this assignment
g.image_dst = 'pynoop_cmd'
g.graph('build.build_date', 'pps', search)
#######################
# build time
# Exact match:
search_test.command = 'make'
search_test.packets = False
search = info.RawData(profile=search_profile,
build=search_build,
test=search_test,
result=search_result)
#! Write a function for this assignment
search.pps = False
search.spmp = False
g.image_dst = 'make_cmd'
g.graph('build.build_date', 'result.total', search)
#def main():
# b = Benchmarker('zz_performance_archive')
## Fake make times are the integers
# b.run_tests(['twisted'],314)
# b.run_tests(['ndebug'],213)
# b.run_tests([],218)
# b.run_tests(['ndebug','twisted'],227)
# b.show_results()
#
#if __name__ == '__main__':
# main()
| gpl-3.0 |
wdwvt1/scikit-bio | skbio/stats/distance/tests/test_bioenv.py | 13 | 9972 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
import pandas as pd
from skbio import DistanceMatrix
from skbio.stats.distance import bioenv
from skbio.stats.distance._bioenv import _scale
from skbio.util import get_data_path, assert_data_frame_almost_equal
class BIOENVTests(TestCase):
"""Results were verified with R 3.0.2 and vegan 2.0-10 (vegan::bioenv)."""
def setUp(self):
# The test dataset used here is a subset of the Lauber et al. 2009
# "88 Soils" dataset. It has been altered to exercise various aspects
# of the code, including (but not limited to):
#
# - order of distance matrix IDs and IDs in data frame (metadata) are
# not exactly the same
# - data frame has an extra sample that is not in the distance matrix
# - this extra sample has non-numeric and missing values in some of its
# cells
#
# Additional variations of the distance matrix and data frame are used
# to test different orderings of rows/columns, extra non-numeric data
# frame columns, etc.
#
# This dataset is also useful because it is non-trivial in size (6
# samples, 11 environment variables) and it includes positive/negative
# floats and integers in the data frame.
self.dm = DistanceMatrix.read(get_data_path('dm.txt'))
# Reordered rows and columns (i.e., different ID order). Still
# conceptually the same distance matrix.
self.dm_reordered = DistanceMatrix.read(
get_data_path('dm_reordered.txt'))
self.df = pd.read_csv(get_data_path('df.txt'), sep='\t', index_col=0)
# Similar to the above data frame, except that it has an extra
# non-numeric column, and some of the other rows and columns have been
# reordered.
self.df_extra_column = pd.read_csv(
get_data_path('df_extra_column.txt'), sep='\t', index_col=0)
# All columns in the original data frame (these are all numeric
# columns).
self.cols = self.df.columns.tolist()
# This second dataset is derived from vegan::bioenv's example dataset
# (varespec and varechem). The original dataset includes a site x
# species table (e.g., OTU table) and a data frame of environmental
# variables. Since the bioenv function defined here accepts a distance
# matrix, we use a Bray-Curtis distance matrix that is derived from the
# site x species table (this matches what is done by vegan::bioenv when
# provided an OTU table, using their default distance measure). The
# data frame only includes the numeric environmental variables we're
# interested in for these tests: log(N), P, K, Ca, pH, Al
self.dm_vegan = DistanceMatrix.read(
get_data_path('bioenv_dm_vegan.txt'))
self.df_vegan = pd.read_csv(
get_data_path('bioenv_df_vegan.txt'), sep='\t',
converters={0: str})
self.df_vegan.set_index('#SampleID', inplace=True)
# Load expected results.
self.exp_results = pd.read_csv(get_data_path('exp_results.txt'),
sep='\t', index_col=0)
self.exp_results_single_column = pd.read_csv(
get_data_path('exp_results_single_column.txt'), sep='\t',
index_col=0)
self.exp_results_different_column_order = pd.read_csv(
get_data_path('exp_results_different_column_order.txt'), sep='\t',
index_col=0)
self.exp_results_vegan = pd.read_csv(
get_data_path('bioenv_exp_results_vegan.txt'), sep='\t',
index_col=0)
def test_bioenv_all_columns_implicit(self):
# Test with all columns in data frame (implicitly).
obs = bioenv(self.dm, self.df)
assert_data_frame_almost_equal(obs, self.exp_results)
# Should get the same results if order of rows/cols in distance matrix
# is changed.
obs = bioenv(self.dm_reordered, self.df)
assert_data_frame_almost_equal(obs, self.exp_results)
def test_bioenv_all_columns_explicit(self):
# Test with all columns being specified.
obs = bioenv(self.dm, self.df, columns=self.cols)
assert_data_frame_almost_equal(obs, self.exp_results)
# Test against a data frame that has an extra non-numeric column and
# some of the rows and columns reordered (we should get the same
# result since we're specifying the same columns in the same order).
obs = bioenv(self.dm, self.df_extra_column, columns=self.cols)
assert_data_frame_almost_equal(obs, self.exp_results)
def test_bioenv_single_column(self):
obs = bioenv(self.dm, self.df, columns=['PH'])
assert_data_frame_almost_equal(obs, self.exp_results_single_column)
def test_bioenv_different_column_order(self):
# Specifying columns in a different order will change the row labels in
# the results data frame as the column subsets will be reordered, but
# the actual results (e.g., correlation coefficients) shouldn't change.
obs = bioenv(self.dm, self.df, columns=self.cols[::-1])
assert_data_frame_almost_equal(
obs,
self.exp_results_different_column_order)
def test_bioenv_no_side_effects(self):
# Deep copies of both primary inputs.
dm_copy = self.dm.copy()
df_copy = self.df.copy(deep=True)
bioenv(self.dm, self.df)
# Make sure we haven't modified the primary input in some way (e.g.,
# with scaling, type conversions, etc.).
self.assertEqual(self.dm, dm_copy)
assert_data_frame_almost_equal(self.df, df_copy)
def test_bioenv_vegan_example(self):
# The correlation coefficient in the first row of the
# results (rho=0.2516) is different from the correlation coefficient
# computed by vegan (rho=0.2513). This seems to occur due to
# differences in numerical precision when calculating the Euclidean
# distances, which affects the rank calculations in Spearman
# (specifically, dealing with ties). The ranked distances end up being
# slightly different between vegan and our implementation because some
# distances are treated as ties in vegan but treated as distinct values
# in our implementation. This explains the difference in rho values. I
# verified that using Pearson correlation instead of Spearman on the
# same distances yields *very* similar results. Thus, the discrepancy
# seems to stem from differences when computing ranks/ties.
obs = bioenv(self.dm_vegan, self.df_vegan)
assert_data_frame_almost_equal(obs, self.exp_results_vegan)
def test_bioenv_no_distance_matrix(self):
with self.assertRaises(TypeError):
bioenv('breh', self.df)
def test_bioenv_no_data_frame(self):
with self.assertRaises(TypeError):
bioenv(self.dm, None)
def test_bioenv_duplicate_columns(self):
with self.assertRaises(ValueError):
bioenv(self.dm, self.df, columns=self.cols + ['PH'])
def test_bioenv_no_columns(self):
with self.assertRaises(ValueError):
bioenv(self.dm, self.df, columns=[])
def test_bioenv_missing_columns(self):
with self.assertRaises(ValueError):
bioenv(self.dm, self.df, columns=self.cols + ['brofist'])
def test_bioenv_missing_distance_matrix_ids(self):
df = self.df[1:]
with self.assertRaises(ValueError):
bioenv(self.dm, df)
def test_bioenv_nans(self):
df = self.df.replace(53.9, np.nan)
with self.assertRaises(ValueError):
bioenv(self.dm, df)
def test_bioenv_nonnumeric_columns(self):
df = self.df.replace(2400, 'no cog yay')
with self.assertRaises(TypeError):
bioenv(self.dm, df)
with self.assertRaises(TypeError):
bioenv(self.dm, self.df_extra_column)
def test_scale_single_column(self):
df = pd.DataFrame([[1], [0], [2]], index=['A', 'B', 'C'],
columns=['foo'])
exp = pd.DataFrame([[0.0], [-1.0], [1.0]], index=['A', 'B', 'C'],
columns=['foo'])
obs = _scale(df)
assert_data_frame_almost_equal(obs, exp)
def test_scale_multiple_columns(self):
# Floats and ints, including positives and negatives.
df = pd.DataFrame([[7.0, 400, -1],
[8.0, 530, -5],
[7.5, 450, 1],
[8.5, 810, -4]],
index=['A', 'B', 'C', 'D'],
columns=['pH', 'Elevation', 'negatives'])
exp = pd.DataFrame([[-1.161895, -0.805979, 0.453921],
[0.387298, -0.095625, -0.998625],
[-0.387298, -0.532766, 1.180194],
[1.161895, 1.434369, -0.635489]],
index=['A', 'B', 'C', 'D'],
columns=['pH', 'Elevation', 'negatives'])
obs = _scale(df)
assert_data_frame_almost_equal(obs, exp)
def test_scale_no_variance(self):
df = pd.DataFrame([[-7.0, -1.2], [6.2, -1.2], [2.9, -1.2]],
index=['A', 'B', 'C'], columns=['foo', 'bar'])
with self.assertRaises(ValueError):
_scale(df)
if __name__ == '__main__':
main()
| bsd-3-clause |
pyamg/pyamg | pyamg/tests/aggviz.py | 1 | 3165 | import matplotlib.pyplot as plt
import numpy as np
import scipy.sparse as sparse
import pyamg
import shapely.geometry as sg
from shapely.ops import cascaded_union
def plotaggs(AggOp, V, E, G, ax, **kwargs):
"""
Parameters
----------
AggOp : CSR sparse matrix
n x nagg encoding of the aggregates AggOp[i,j] == 1 means node i is in aggregate j
V : ndarray
n x 2 coordinate array of the mesh
E : ndarray
nel x 3 index array of the mesh elements
G : CSR sparse matrix
n x n connectivity matrix for the vertices
ax : axis
matplotlib axis
"""
# plot the mesh
# ax.triplot(V[:,0], V[:,1], E, color='0.5', lw=1.0)
# plot the markers
# ax.plot(V[:,0],V[:,1], 's', ms=5, markeredgecolor='w', color='tab:red')
# for i in range(V.shape[0]):
# ax.text(V[i,0], V[i,1], f'{i}')
for agg in AggOp.T: # for each aggregate
aggids = agg.indices # get the indices
todraw = [] # collect things to draw
if len(aggids) == 1:
i = aggids[0]
coords = (V[i, 0], V[i,1])
newobj = sg.Point(coords)
todraw.append(newobj)
for i in aggids: # for each point in the aggregate
nbrs = G.getrow(i).indices # get the neighbors in the graph
for j1 in nbrs: # for each neighbor
found = False # mark if a triad ("triangle") is found
for j2 in nbrs:
if (j1!=j2 and i!=j1 and i!=j2 and # don't count i - j - j as a triangle
j1 in aggids and j2 in aggids and # j1/j2 are in the aggregate
G[j1,j2] # j1/j2 are connected
):
found = True # i - j1 - j2 are in the aggregate
coords = list(zip(V[[i,j1,j2], 0], V[[i,j1,j2],1]))
todraw.append(sg.Polygon(coords)) # add the triangle to the list
if not found and i!=j1 and j1 in aggids: # if we didn't find a triangle, then ...
coords = list(zip(V[[i,j1], 0], V[[i,j1],1]))
newobj = sg.LineString(coords) # add a line object to the list
todraw.append(newobj)
todraw = cascaded_union(todraw) # union all objects in the aggregate
todraw = todraw.buffer(0.1) # expand to smooth
todraw = todraw.buffer(-0.05) # then contract
try:
xs, ys = todraw.exterior.xy # get all of the exterior points
ax.fill(xs, ys,
**kwargs,
clip_on=False) # fill with a color
except:
pass # don't plot certain things (singletons)
ax.set_aspect('equal')
| mit |
CompPhysics/ComputationalPhysics2 | doc/src/NeuralNet/figures/plotEnergies.py | 10 | 1487 | import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
try:
dataFileName = sys.argv[1]
except IndexError:
print("USAGE: python plotEnergies.py 'filename'")
sys.exit(0)
HFEnergy3 = 3.161921401722216
HFEnergy6 = 20.71924844033019
numParticles = \
int(dataFileName[dataFileName.find('N')+1:dataFileName.find('E')-1])
hfenergyFound = False
if (numParticles == 2):
HFEnergy = 3.161921401722216
hfenergyFound = True
elif (numParticles == 6):
HFEnergy = 20.71924844033019
hfenergyFound = True
else:
hfenergyFound = False
data = np.loadtxt(dataFileName, dtype=np.float64)
data[:,1] = np.sqrt(data[:,1])
n = len(data[:,0])
x = np.arange(0,n)
fig = plt.figure()
if (hfenergyFound):
yline = np.zeros(n)
yline.fill(HFEnergy)
plt.plot(x, yline, 'r--', label="HF Energy")
msize = 1.0
ax = fig.add_subplot(111)
plt.errorbar(x, data[:,0], yerr=data[:,1], fmt='bo', markersize=msize, label="VMC Energy")
plt.fill_between(x, data[:,0]-data[:,1], data[:,0]+data[:,1])
plt.xlim(0,n)
plt.xlabel('Iteration')
plt.ylabel('$E_0[a.u]$')
plt.legend(loc='best')
minSub = 80
maxSub = 120
inset_axes(ax, width="50%", height=1.0, loc='right')
plt.errorbar(x[minSub:maxSub], data[minSub:maxSub,0],
yerr=data[minSub:maxSub,1], fmt='bo', markersize=msize, label="VMC "
"Energy")
plt.plot(x[minSub:maxSub], yline[minSub:maxSub], 'r--', label="HF Energy")
plt.show()
| cc0-1.0 |
joernhees/scikit-learn | sklearn/metrics/cluster/__init__.py | 91 | 1468 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import fowlkes_mallows_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .unsupervised import calinski_harabaz_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"fowlkes_mallows_score", "entropy", "silhouette_samples",
"silhouette_score", "calinski_harabaz_score", "consensus_score"]
| bsd-3-clause |
dgwakeman/mne-python | examples/time_frequency/plot_source_power_spectrum.py | 19 | 1929 | """
=========================================================
Compute power spectrum densities of the sources with dSPM
=========================================================
Returns an STC file containing the PSD (in dB) of each of the sources.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
# Setup for reading the raw data
raw = io.Raw(raw_fname, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
label = mne.read_label(fname_label)
stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
pick_ori="normal", n_fft=n_fft, label=label)
stc.save('psd_dSPM')
###############################################################################
# View PSD of sources in label
plt.plot(1e3 * stc.times, stc.data.T)
plt.xlabel('Frequency (Hz)')
plt.ylabel('PSD (dB)')
plt.title('Source Power Spectrum (PSD)')
plt.show()
| bsd-3-clause |
HolgerPeters/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
loli/sklearn-ensembletrees | sklearn/datasets/tests/test_20news.py | 42 | 2416 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
YcheLanguageStudio/PythonStudy | bioinformatics/hypothesis_test/binomial_distribution.py | 1 | 1571 | # Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy.stats import binom
from matplotlib import pyplot as plt
# ----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
# ------------------------------------------------------------
# Define the distribution parameters to be plotted
n_values = [255, 255]
b_values = [1.0 / 6]
linestyles = ['-']
x = np.arange(-1, 200)
# ------------------------------------------------------------
# plot the distributions
fig, ax = plt.subplots(figsize=(5, 3.75))
for (n, b, ls) in zip(n_values, b_values, linestyles):
# create a binomial distribution
dist = binom(n, b)
plt.plot(x, dist.pmf(x), ls=ls, c='black',
label=r'$b=%.1f,\ n=%i$' % (b, n), linestyle='steps-mid')
plt.xlim(-0.5, 256)
plt.ylim(0, 0.2)
plt.xlabel('$x$')
plt.ylabel(r'$p(x|b, n)$')
plt.title('Binomial Distribution')
plt.legend()
plt.show()
| mit |
jaidevd/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
RayMick/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
MTgeophysics/mtpy | mtpy/modeling/modem/data_model_analysis.py | 1 | 17022 | """
Description:
Extract info from a pair of files (namely .dat and .rho) of modem inversion results
re-write the data into other formats such as csv.
Get a slice of the model data for analysis and plotting visualization.
The output CSV file include StationName, Lat, Long, X, Y, Z, Log(Resistivity)
where (X,Y,Z) are relative distances in meters from the mesh's origin.
Projection/Coordinate system must be known in order to associate (Lat, Long) to (X, Y)
CreationDate: 8/09/2017
Developer: fei.zhang@ga.gov.au
LastUpdate: 15/09/2017 FZ
"""
import csv
import glob
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mtpy.modeling.modem import Data, Model
from mtpy.utils.mtpylog import MtPyLog
logger = MtPyLog.get_mtpy_logger(__name__)
# logger.setLevel(logging.DEBUG)
class DataModelAnalysis(object):
def __init__(self, filedat, filerho, plot_orient='ew', **kwargs):
"""Constructor
:param filedat: path2file.dat
:param filerho: path2file.rho
:param plot_orient: plot orientation ['ew','ns', 'z']
"""
self.datfile = filedat
self.rhofile = filerho
# plot orientation 'ns' (north-south),'ew' (east-west) or
# 'z' (horizontal slice))
self.plot_orientation = plot_orient
# slice location, in local grid coordinates (if it is a z slice, this
# is slice depth)
# self.slice_location = kwargs.pop('slice_location', 1000)
# maximum distance in metres from vertical slice location and station
self.station_dist = kwargs.pop('station_dist', 50000)
# z limits (positive down so order is reversed)
self.zlim = kwargs.pop('zlim', (200000, -2000))
# colour limits
self.clim = kwargs.pop('clim', [0.3, 3.7])
self.fig_size = kwargs.pop('fig_size', [12, 10])
self.font_size = kwargs.pop('font_size', 16)
self.border_linewidth = 2
self.map_scale = kwargs.pop('map_scale', 'm')
# make map scale
if self.map_scale == 'km':
self.dscale = 1000.
elif self.map_scale == 'm':
self.dscale = 1.
else:
print(("Unknown map scale:", self.map_scale))
self.xminorticks = kwargs.pop('xminorticks', 10000)
self.yminorticks = kwargs.pop('yminorticks', 10000)
# read in the model data-file and rho-file
self._read_model_data()
return
def _read_model_data(self):
self.datObj = Data()
self.datObj.read_data_file(data_fn=self.datfile)
self.modObj = Model(model_fn=self.rhofile)
self.modObj.read_model_file()
self.ew_lim = (
self.modObj.grid_east[self.modObj.pad_east], self.modObj.grid_east[-self.modObj.pad_east - 1])
self.ns_lim = (
self.modObj.grid_north[self.modObj.pad_north], self.modObj.grid_north[-self.modObj.pad_north - 1])
# logger.debug("ns-limit %s", self.ns_lim)
# logger.debug("ew-limit %s", self.ew_lim)
# logger.info("station name list %s", self.datObj.station_locations['station'])
# logger.info("station Lat list %s", self.datObj.station_locations['lat'])
return
def find_stations_in_meshgrid(self):
"""
find the (station_Name, sX,sY) its associated index (sI,sJ) in the regular mesh grid (X[i],Y[j])
# print(len(sX), sX) # =number of stations
# print(len(sY), sY) # =number of stations
:return: station_dict
"""
station_dict = {}
sX, sY = self.datObj.station_locations.rel_east, self.datObj.station_locations.rel_north
station_names = self.datObj.station_locations.station
station_lats = self.datObj.station_locations.lat
station_lons = self.datObj.station_locations.lon
# get grid centres (finite element cells centres)
gceast, gcnorth = [np.mean([arr[:-1], arr[1:]], axis=0) for arr in
[self.modObj.grid_east, self.modObj.grid_north]]
n_stations = len(sX)
for n in range(n_stations):
xdist = np.abs(gceast - sX[n])
snos = np.where(xdist == np.amin(xdist))
ix = snos[0][0]
ydist = np.abs(gcnorth - sY[n])
snos = np.where(ydist == np.amin(ydist))
iy = snos[0][0]
logger.debug("Station Index: (%s, %s)", ix, iy)
station_dict[(ix, iy)] = [station_names[n], sX[n], sY[n], station_lats[n],
station_lons[n]] # Todo: get (station_name, lat, long)[n]
logger.debug(station_dict)
return station_dict
def set_plot_orientation(self, orient):
"""set a new plot orientation for plotting
:param orient: z, ew, ns
:return:
"""
if orient in ['z', 'ew', 'ns']:
self.plot_orientation = orient
else:
raise Exception("Error: unknown orientation value= %s" % orient)
def get_slice_data(self, slice_location):
"""
get the resistivity slices at the specified location
:param slice_location:
:return: slice data
"""
# get grid centres (finite element cells centres)
gcz = np.mean([self.modObj.grid_z[:-1],
self.modObj.grid_z[1:]], axis=0)
gceast, gcnorth = [np.mean([arr[:-1], arr[1:]], axis=0) for arr in
[self.modObj.grid_east, self.modObj.grid_north]]
# distance from slice to grid centre locations
if self.plot_orientation == 'ew':
sdist = np.abs(gcnorth - slice_location)
snos = np.where(sdist == np.amin(sdist))
sno = snos[0][0]
actual_location = gcnorth[sno]
elif self.plot_orientation == 'ns':
sdist = np.abs(gceast - slice_location)
snos = np.where(sdist == np.amin(sdist))
sno = snos[0][0]
actual_location = gceast[sno]
elif self.plot_orientation == 'z':
sdist = np.abs(gcz - slice_location)
# find the closest slice index to specified location
snos = np.where(sdist == np.amin(sdist))
sno = snos[0][0]
actual_location = gcz[sno]
print((type(snos), len(snos))) # ((index1), (index2), (index3))
# unpack the index tupple, and get the integer value as index number
# sno=snos[0][0]
logger.debug("the slice index number= %s and the actual location is %s", sno, actual_location)
# get data for plotting
if self.plot_orientation == 'ew':
X, Y, res = self.modObj.grid_east, self.modObj.grid_z, np.log10(
self.modObj.res_model[sno, :, :].T)
ss = np.where(np.abs(self.datObj.station_locations['rel_north'] - np.median(gcnorth)) < self.station_dist)[
0]
sX, sY = self.datObj.station_locations['rel_east'][
ss], self.datObj.station_locations['elev'][ss]
xlim = (self.modObj.grid_east[
self.modObj.pad_east[1]], self.modObj.grid_east[-self.modObj.pad_east[1] - 1])
ylim = self.zlim
title = 'East-west slice at {} meters north'.format(gcnorth[sno])
elif self.plot_orientation == 'ns':
X, Y, res = self.modObj.grid_north, self.modObj.grid_z, np.log10(
self.modObj.res_model[:, sno, :].T)
# indices for selecting stations close to profile
ss = np.where(
np.abs(
self.datObj.station_locations['rel_east'] -
np.median(gceast)) < self.station_dist)[0]
sX, sY = self.datObj.station_locations['rel_north'][
ss], self.datObj.station_locations['elev'][ss]
xlim = (self.modObj.grid_north[
self.modObj.pad_north[1]], self.modObj.grid_north[-self.modObj.pad_north[1] - 1])
ylim = self.zlim
title = 'North-south slice at {} meters east'.format(gceast[sno])
elif self.plot_orientation == 'z': # for plotting X == EW Y == NS
Y, X, res = self.modObj.grid_north, self.modObj.grid_east, np.log10(self.modObj.res_model[:, :, sno])
sY, sX = self.datObj.station_locations.rel_north, self.datObj.station_locations.rel_east
ylim = (
self.modObj.grid_north[self.modObj.pad_north], self.modObj.grid_north[-self.modObj.pad_north - 1])
xlim = (self.modObj.grid_east[self.modObj.pad_east], self.modObj.grid_east[-self.modObj.pad_east - 1])
title = 'Horizontal Slice at Depth {} meters'.format(gcz[sno])
return (X, Y, res, sX, sY, xlim, ylim, title, actual_location)
def create_csv(self, csvfile='tests/temp/Resistivity.csv'):
"""
write ressitivity into the csvfile with the output columns:
StationName, Lat, Long, X, Y, Z, Log(Resistivity)
where (X,Y,Z) are relative distances in meters from the mesh's origin.
Projection/Coordinate system must be known in order to associate (Lat, Long) to (X, Y)
:return:
"""
self.set_plot_orientation('z')
z_cell_centres = np.mean([self.modObj.grid_z[:-1], self.modObj.grid_z[1:]], axis=0)
# csv_header = ['Station', 'Lat', 'Long', 'X', 'Y', 'Z', 'Log_Resisitivity']
csv_header = ['X', 'Y', 'Z', 'Log_Resisitivity', 'StationName', 'StationX', 'StationY', 'Lat', 'Long']
stationd = self.find_stations_in_meshgrid()
csvrows = []
for zslice in z_cell_centres:
(X, Y, res, sX, sY, xlim, ylim, title, Z_location) = self.get_slice_data(zslice)
# print (X,Y,res)
# print(sX,sY)
print((len(X), len(Y), Z_location, res.shape, len(sX), len(sY)))
for i in range(len(X) - 1):
for j in range(len(Y) - 1):
st = stationd.get((i, j), None) # filter and subset for station location meshgrids
if st is not None:
arow = [X[i], Y[j], Z_location, res[j, i], st[0], st[1], st[2], st[3], st[4], i, j]
csvrows.append(arow)
with open(csvfile, "wb") as csvf:
writer = csv.writer(csvf)
writer.writerow(csv_header)
writer.writerows(csvrows)
logger.debug("Wrote data into CSV file %s", csvfile)
return csvfile
def plot_a_slice(self, slice_location=1000):
""" create a plot based on the input data and parameters
:return:
"""
(X, Y, res, sX, sY, xlim, ylim, title, actual_location) = self.get_slice_data(slice_location)
# make the plot
fdict = {'size': self.font_size, 'weight': 'bold'}
plt.figure(figsize=self.fig_size)
plt.rcParams['font.size'] = self.font_size
# plot station locations
# print("station locations sX:", sX)
# print("station locations sY:", sY)
plt.plot(sX, sY, 'kv') # station marker:'kv'
mesh_plot = plt.pcolormesh(X, Y, res, cmap='bwr_r')
xlim2 = (xlim[0] / self.dscale, xlim[1] / self.dscale)
ylim2 = (ylim[0] / self.dscale, ylim[1] / self.dscale)
plt.xlim(*xlim)
plt.ylim(*ylim)
# set title
plt.title(title, fontdict=fdict)
# if self.plot_orientation == 'z':
# plt.gca().set_aspect('equal') # an axis may be too small to view
plt.gca().set_aspect('auto')
plt.clim(*self.clim)
# plt.colorbar()
# FZ: fix miss-placed colorbar
ax = plt.gca()
ax.xaxis.set_minor_locator(
MultipleLocator(
self.xminorticks)) # /self.dscale
ax.yaxis.set_minor_locator(
MultipleLocator(
self.yminorticks)) # /self.dscale
ax.tick_params(axis='both', which='minor', width=2, length=5)
ax.tick_params(
axis='both',
which='major',
width=3,
length=15,
labelsize=20)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(self.border_linewidth)
# ax.tick_params(axis='both', which='major', labelsize=20)
# ax.tick_params(axis='both', which='minor', labelsize=20)
# http://stackoverflow.com/questions/10171618/changing-plot-scale-by-a-factor-in-matplotlib
xticks = ax.get_xticks() / self.dscale
ax.set_xticklabels(xticks)
yticks = ax.get_yticks() / self.dscale
ax.set_yticklabels(yticks)
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = make_axes_locatable(ax)
# pad = separation from figure to colorbar
cax = divider.append_axes("right", size="5%", pad=0.2)
mycb = plt.colorbar(mesh_plot, cax=cax, use_gridspec=True)
mycb.outline.set_linewidth(self.border_linewidth)
mycb.set_label('Resistivity ($\Omega \cdot$m)', fontdict=fdict)
if self.plot_orientation == 'z':
ax.set_ylabel('Northing (' + self.map_scale + ')', fontdict=fdict)
ax.set_xlabel('Easting (' + self.map_scale + ')', fontdict=fdict)
ax.set_aspect(1)
if self.plot_orientation == 'ew':
ax.set_ylabel('Depth (' + self.map_scale + ')', fontdict=fdict)
ax.set_xlabel('Easting (' + self.map_scale + ')', fontdict=fdict)
if self.plot_orientation == 'ns':
ax.set_ylabel('Depth (' + self.map_scale + ')', fontdict=fdict)
ax.set_xlabel('Northing (' + self.map_scale + ')', fontdict=fdict)
plt.show()
return
def plot_multi_slices(self, slice_list=None):
"""
Visualize multiple slices specified by slice_list.
If it is None then will plot every slice at the cell-centres.
:param slice_list:
:return:
"""
if slice_list is None:
# slice_number = 100 # number of evenly spaced slices
if self.plot_orientation == 'ns':
# slice_locs = np.linspace(self.ns_lim[0], self.ns_lim[1], num=slice_number
# It's better to use cell centres
slice_locs = np.mean([self.modObj.grid_north[:-1], self.modObj.grid_north[1:]], axis=0)
if self.plot_orientation == 'ew':
slice_locs = np.mean([self.modObj.grid_east[:-1], self.modObj.grid_east[1:]], axis=0)
if self.plot_orientation == 'z':
slice_locs = np.mean([self.modObj.grid_z[:-1], self.modObj.grid_z[1:]], axis=0)
else:
slice_locs = slice_list
logger.debug("Slice locations= %s", slice_locs)
logger.debug("Number of slices to be visualised %s", len(slice_locs))
for dist in slice_locs:
sdist = int(dist)
print(("**** The user-input slice location is: ****", sdist))
print("**** The actual location will be at the nearest cell centre ****")
# plot resistivity image at slices in three orientations at a given slice_location=sdist
self.plot_a_slice(slice_location=sdist) # actual location will be nearest cell centre
plt.show()
#########################################################################
# How to call the create csv function
# Usage:
# python mtpy/modeling/modem/data_model_analysis.py /e/Data/Modeling/Isa/100hs_flat_BB/Isa_run3_NLCG_048.dat /e/Data/Modeling/Isa/100hs_flat_BB/Isa_run3_NLCG_048.rho
# python mtpy/modeling/modem/data_model_analysis.py /e/MTPY2_Outputs/GA_UA_edited_10s-10000s_modem_inputs/ModEM_Data.dat /e/MTPY2_Outputs/GA_UA_edited_10s-10000s_modem_inputs/ModEM_Model.ws
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# Take commandline input
if len(sys.argv) == 2: # A model dir provided
modeldir = sys.argv[1]
datf = os.path.join(modeldir, 'ModEM_Data.dat')
rhofiles = glob.glob(os.path.join(modeldir, '*.rho'))
print(rhofiles)
if len(rhofiles) < 1:
print(("No rho files found in the dir %s", modeldir))
sys.exit(1)
else:
# the file with highest numbers in the last 3 numbers before *.rho
rhof = sorted(rhofiles)[-1]
print(("Effective Files Used in Plot: ", datf, rhof))
# dat and rho file both provided
if len(sys.argv) >= 3:
datf = sys.argv[1]
rhof = sys.argv[2]
# construct plot object
# self = DataModelAnalysis(datf, rhof) # default map_scale='m')
myObj = DataModelAnalysis(datf, rhof, map_scale='km')
myObj.create_csv()
# To visualize slices, see also the script: mtpy/imaging/modem_plot_slices.py
| gpl-3.0 |
PrefPy/prefpy | prefpy/mechanism.py | 1 | 85490 | """
Authors: Kevin J. Hwang
Jun Wang
Tyler Shepherd
"""
import io
import math
import time
from numpy import *
import itertools
from preference import Preference
from profile import Profile
import copy
import sys
import networkx as nx
from collections import defaultdict
import matplotlib.pyplot as plt
from queue import PriorityQueue
class Mechanism():
"""
The parent class for all mechanisms. This class should not be constructed directly. All child
classes are expected to contain the following variable(s).
:ivar bool maximizeCandScore: True if the mechanism requires winners to maximize their score
and if False otherwise.
"""
def getWinners(self, profile):
"""
Returns a list of all winning candidates given an election profile. This function assumes
that getCandScoresMap(profile) is implemented for the child Mechanism class.
:ivar Profile profile: A Profile object that represents an election profile.
"""
candScores = self.getCandScoresMap(profile)
# Check whether the winning candidate is the candidate that maximizes the score or
# minimizes it.
if self.maximizeCandScore == True:
bestScore = max(candScores.values())
else:
bestScore = min(candScores.values())
# Create a list of all candidates with the winning score and return it.
winners = []
for cand in candScores.keys():
if candScores[cand] == bestScore:
winners.append(cand)
return winners
def getRanking(self, profile):
"""
Returns a list of lists that orders all candidates in tiers from best to worst given an
election profile. This function assumes that getCandScoresMap(profile) is implemented for
the child Mechanism class.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# We generate a map that associates each score with the candidates that have that acore.
candScoresMap = self.getCandScoresMap(profile)
reverseCandScoresMap = dict()
for key, value in candScoresMap.items():
if value not in reverseCandScoresMap.keys():
reverseCandScoresMap[value] = [key]
else:
reverseCandScoresMap[value].append(key)
# We sort the scores by either decreasing order or increasing order.
if self.maximizeCandScore == True:
sortedCandScores = sorted(reverseCandScoresMap.keys(), reverse=True)
else:
sortedCandScores = sorted(reverseCandScoresMap.keys())
# We put the candidates into our ranking based on the order in which their score appears
ranking = []
for candScore in sortedCandScores:
currRanking = []
for cand in reverseCandScoresMap[candScore]:
currRanking.append(cand)
ranking.append(currRanking)
# Right now we return a list that contains the ranking list. This is for future extensions.
results = []
results.append(ranking)
return results
class MechanismPosScoring(Mechanism):
"""
The positional scoring mechanism. This class is the parent class for several mechanisms. This
can also be constructed directly. All child classes are expected to implement the
getScoringVector() method.
:ivar list<int> scoringVector: A list of integers (or floats) that give the scores assigned to
each position in a ranking from first to last.
"""
def __init__(self, scoringVector):
self.maximizeCandScore = True
self.scoringVector = scoringVector
def isProfileValid(self, profile):
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
return False
return True
def getScoringVector(self, profile):
"""
Returns the scoring vector. This function is called by getCandScoresMap().
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Check to make sure that the scoring vector contains a score for every possible rank in a
# ranking.
if len(self.scoringVector) != profile.numCands:
print("ERROR: scoring vector is not the correct length")
exit()
return self.scoringVector
def getCandScoresMap(self, profile):
"""
Returns a dictonary that associates the integer representation of each candidate with the
score they recieved in the profile.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported election type")
exit()
# Initialize our dictionary so that all candidates have a score of zero.
candScoresMap = dict()
for cand in profile.candMap.keys():
candScoresMap[cand] = 0.0
rankMaps = profile.getRankMaps()
rankMapCounts = profile.getPreferenceCounts()
scoringVector = self.getScoringVector(profile)
# Go through the rankMaps of the profile and increment each candidates score appropriately.
for i in range(0, len(rankMaps)):
rankMap = rankMaps[i]
rankMapCount = rankMapCounts[i]
for cand in rankMap.keys():
candScoresMap[cand] += scoringVector[rankMap[cand] - 1] * rankMapCount
# print("candScoresMap=", candScoresMap)
return candScoresMap
def getMov(self, profile):
"""
Returns an integer that is equal to the margin of victory of the election profile.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# from . import mov
import mov
return mov.MoVScoring(profile, self.getScoringVector(profile))
class MechanismPlurality(MechanismPosScoring):
"""
The plurality mechanism. This inherits from the positional scoring mechanism.
"""
def __init__(self):
self.maximizeCandScore = True
def getCandScoresMap(self, profile):
"""
Returns a dictonary that associates the integer representation of each candidate with the
score they recieved in the profile.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported election type")
exit()
# Initialize our dictionary so that all candidates have a score of zero.
candScoresMap = dict()
for cand in profile.candMap.keys():
candScoresMap[cand] = 0.0
rankMaps = profile.getRankMaps()
rankMapCounts = profile.getPreferenceCounts()
scoringVector = self.getScoringVector(profile)
# Go through the rankMaps of the profile and increment each candidates score appropriately.
for i in range(0, len(rankMaps)):
rankMap = rankMaps[i]
rankMapCount = rankMapCounts[i]
for cand in rankMap.keys():
candScoresMap[cand] += scoringVector[rankMap[cand] - 1] * rankMapCount
# print("candScoresMap=", candScoresMap)
return candScoresMap
def getScoringVector(self, profile):
"""
Returns the scoring vector [1,0,0,...,0]. This function is called by getCandScoresMap()
which is implemented in the parent class.
:ivar Profile profile: A Profile object that represents an election profile.
"""
scoringVector = []
scoringVector.append(1)
for i in range(1, profile.numCands):
scoringVector.append(0)
return scoringVector
class MechanismVeto(MechanismPosScoring):
"""
The veto mechanism. This inherits from the positional scoring mechanism.
"""
def __init__(self):
self.maximizeCandScore = True
def getCandScoresMap(self, profile):
"""
Returns a dictonary that associates the integer representation of each candidate with the
score they recieved in the profile.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported election type")
exit()
# Initialize our dictionary so that all candidates have a score of zero.
candScoresMap = dict()
for cand in profile.candMap.keys():
candScoresMap[cand] = 0.0
rankMaps = profile.getRankMaps()
rankMapCounts = profile.getPreferenceCounts()
# Go through the rankMaps of the profile and increment each candidates score appropriately.
for i in range(0, len(rankMaps)):
rankMap = rankMaps[i]
rankMapCount = rankMapCounts[i]
maxpos = max(rankMap.values())
for cand in rankMap.keys():
if rankMap[cand] < maxpos:
candScoresMap[cand] += rankMapCount
# print("candScoresMap=", candScoresMap)
return candScoresMap
class MechanismBorda(MechanismPosScoring):
"""
The Borda mechanism. This inherits from the positional scoring mechanism.
"""
def __init__(self):
self.maximizeCandScore = True
def getScoringVector(self, profile):
"""
Returns the scoring vector [m-1,m-2,m-3,...,0] where m is the number of candidates in the
election profile. This function is called by getCandScoresMap() which is implemented in the
parent class.
:ivar Profile profile: A Profile object that represents an election profile.
"""
scoringVector = []
score = profile.numCands - 1
for i in range(0, profile.numCands):
scoringVector.append(score)
score -= 1
return scoringVector
class MechanismKApproval(MechanismPosScoring):
"""
The top-k mechanism. This inherits from the positional scoring mechanism.
:ivar int k: The number of positions that recieve a score of 1.
"""
def __init__(self, k):
self.maximizeCandScore = True
self.k = k
def getScoringVector(self, profile):
"""
Returns a scoring vector such that the first k candidates recieve 1 point and all others
recive 0 This function is called by getCandScoresMap() which is implemented in the parent
class.
:ivar Profile profile: A Profile object that represents an election profile.
"""
if self.k > profile.numCands:
self.k = profile.numCands
scoringVector = []
for i in range(0, self.k):
scoringVector.append(1)
for i in range(self.k, profile.numCands):
scoringVector.append(0)
return scoringVector
class MechanismSimplifiedBucklin(Mechanism):
"""
The simplified Bucklin mechanism.
"""
def __init__(self):
self.maximizeCandScore = False
def getCandScoresMap(self, profile):
"""
Returns a dictionary that associates integer representations of each candidate with their
Bucklin score.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported profile type")
exit()
bucklinScores = dict()
rankMaps = profile.getRankMaps()
preferenceCounts = profile.getPreferenceCounts()
for cand in profile.candMap.keys():
# We keep track of the number of times a candidate is ranked in the first t positions.
numTimesRanked = 0
# We increase t in increments of 1 until we find t such that the candidate is ranked in the
# first t positions in at least half the votes.
for t in range(1, profile.numCands + 1):
for i in range(0, len(rankMaps)):
if (rankMaps[i][cand] == t):
numTimesRanked += preferenceCounts[i]
if numTimesRanked >= math.ceil(float(profile.numVoters) / 2):
bucklinScores[cand] = t
break
return bucklinScores
def getMov(self, profile):
"""
Returns an integer that is equal to the margin of victory of the election profile.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# from . import mov
import mov
return mov.MoVSimplifiedBucklin(profile)
class MechanismCopeland(Mechanism):
"""
The Copeland mechanism.
"""
def __init__(self, alpha):
self.maximizeCandScore = True
self.alpha = 0.5
def getCandScoresMap(self, profile):
"""
Returns a dictionary that associates integer representations of each candidate with their
Copeland score.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported election type")
exit()
# Initialize each Copeland score as 0.0.
copelandScores = dict()
for cand in profile.candMap.keys():
copelandScores[cand] = 0.0
preferenceCounts = profile.getPreferenceCounts()
# For each pair of candidates, calculate the number of votes in which one beat the other.
wmgMap = profile.getWmg()
for cand1, cand2 in itertools.combinations(wmgMap.keys(), 2):
if cand2 in wmgMap[cand1].keys():
if wmgMap[cand1][cand2] > 0:
copelandScores[cand1] += 1.0
elif wmgMap[cand1][cand2] < 0:
copelandScores[cand2] += 1.0
# If a pair of candidates is tied, we add alpha to their score for each vote.
else:
copelandScores[cand1] += self.alpha
copelandScores[cand2] += self.alpha
return copelandScores
class MechanismMaximin(Mechanism):
"""
The maximin mechanism.
"""
def __init__(self):
self.maximizeCandScore = True
def getCandScoresMap(self, profile):
"""
Returns a dictionary that associates integer representations of each candidate with their
maximin score.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc":
print("ERROR: unsupported election type")
exit()
wmg = profile.getWmg()
# Initialize the maximin score for each candidate as infinity.
maximinScores = dict()
for cand in wmg.keys():
maximinScores[cand] = float("inf")
# For each pair of candidates, calculate the number of times each beats the other.
for cand1, cand2 in itertools.combinations(wmg.keys(), 2):
if cand2 in wmg[cand1].keys():
maximinScores[cand1] = min(maximinScores[cand1], wmg[cand1][cand2])
maximinScores[cand2] = min(maximinScores[cand2], wmg[cand2][cand1])
return maximinScores
class MechanismSchulze(Mechanism):
"""
The Schulze mechanism.
"""
def __init__(self):
self.maximizeCandScore = True
def computeStrongestPaths(self, profile, pairwisePreferences):
"""
Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and
cand2, with the strongest path from cand1 to cand2.
:ivar Profile profile: A Profile object that represents an election profile.
:ivar dict<int,dict<int,int>> pairwisePreferences: A two-dimensional dictionary that
associates every pair of candidates, cand1 and cand2, with number of voters who prefer
cand1 to cand2.
"""
cands = profile.candMap.keys()
numCands = len(cands)
# Initialize the two-dimensional dictionary that will hold our strongest paths.
strongestPaths = dict()
for cand in cands:
strongestPaths[cand] = dict()
for i in range(1, numCands + 1):
for j in range(1, numCands + 1):
if (i == j):
continue
if pairwisePreferences[i][j] > pairwisePreferences[j][i]:
strongestPaths[i][j] = pairwisePreferences[i][j]
else:
strongestPaths[i][j] = 0
for i in range(1, numCands + 1):
for j in range(1, numCands + 1):
if (i == j):
continue
for k in range(1, numCands + 1):
if (i == k or j == k):
continue
strongestPaths[j][k] = max(strongestPaths[j][k], min(strongestPaths[j][i], strongestPaths[i][k]))
return strongestPaths
def computePairwisePreferences(self, profile):
"""
Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and
cand2, with number of voters who prefer cand1 to cand2.
:ivar Profile profile: A Profile object that represents an election profile.
"""
cands = profile.candMap.keys()
# Initialize the two-dimensional dictionary that will hold our pairwise preferences.
pairwisePreferences = dict()
for cand in cands:
pairwisePreferences[cand] = dict()
for cand1 in cands:
for cand2 in cands:
if cand1 != cand2:
pairwisePreferences[cand1][cand2] = 0
for preference in profile.preferences:
wmgMap = preference.wmgMap
for cand1, cand2 in itertools.combinations(cands, 2):
# If either candidate was unranked, we assume that they are lower ranked than all
# ranked candidates.
if cand1 not in wmgMap.keys():
if cand2 in wmgMap.keys():
pairwisePreferences[cand2][cand1] += 1 * preference.count
elif cand2 not in wmgMap.keys():
if cand1 in wmgMap.keys():
pairwisePreferences[cand1][cand2] += 1 * preference.count
elif wmgMap[cand1][cand2] == 1:
pairwisePreferences[cand1][cand2] += 1 * preference.count
elif wmgMap[cand1][cand2] == -1:
pairwisePreferences[cand2][cand1] += 1 * preference.count
return pairwisePreferences
def getCandScoresMap(self, profile):
"""
Returns a dictionary that associates integer representations of each candidate with the
number of other candidates for which her strongest path to the other candidate is greater
than the other candidate's stronget path to her.
:ivar Profile profile: A Profile object that represents an election profile.
"""
cands = profile.candMap.keys()
pairwisePreferences = self.computePairwisePreferences(profile)
strongestPaths = self.computeStrongestPaths(profile, pairwisePreferences)
# For each candidate, determine how many times p[E,X] >= p[X,E] using a variant of the
# Floyd-Warshall algorithm.
betterCount = dict()
for cand in cands:
betterCount[cand] = 0
for cand1 in cands:
for cand2 in cands:
if cand1 == cand2:
continue
if strongestPaths[cand1][cand2] >= strongestPaths[cand2][cand1]:
betterCount[cand1] += 1
return betterCount
def getKendallTauScore(myResponse, otherResponse):
"""
Returns the Kendall Tau Score
"""
# variables
kt = 0
list1 = myResponse.values()
list2 = otherResponse.values()
if len(list1) <= 1:
return kt
# runs through list1
for itr1 in range(0, len(list1) - 1):
# runs through list2
for itr2 in range(itr1 + 1, len(list2)):
# checks if there is a discrepancy. If so, adds
if ((list1[itr1] > list1[itr2]
and list2[itr1] < list2[itr2])
or (list1[itr1] < list1[itr2]
and list2[itr1] > list2[itr2])):
kt += 1
# normalizes between 0 and 1
kt = (kt * 2) / (len(list1) * (len(list1) - 1))
# returns found value
return kt
class MechanismSTV():
"""
The STV mechanism.
"""
def STVwinners(self, profile):
elecType = profile.getElecType()
if elecType == "soc" or elecType == "csv":
return self.STVsocwinners(profile)
elif elecType == "toc":
return self.STVtocwinners(profile)
else:
print("ERROR: unsupported profile type")
exit()
def STVsocwinners(self, profile):
"""
Returns an integer list that represents all possible winners of a profile under STV rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
ordering = profile.getOrderVectors()
prefcounts = profile.getPreferenceCounts()
m = profile.numCands
if min(ordering[0]) == 0:
startstate = set(range(m))
else:
startstate = set(range(1, m + 1))
ordering, startstate = self.preprocessing(ordering, prefcounts, m, startstate)
m_star = len(startstate)
known_winners = set()
# ----------Some statistics--------------
hashtable2 = set()
# push the node of start state into the priority queue
root = Node(value=startstate)
stackNode = []
stackNode.append(root)
while stackNode:
# ------------pop the current node-----------------
node = stackNode.pop()
# -------------------------------------------------
state = node.value.copy()
# use heuristic to delete all the candidates which satisfy the following condition
# goal state 1: if the state set contains only 1 candidate, then stop
if len(state) == 1 and list(state)[0] not in known_winners:
known_winners.add(list(state)[0])
continue
# goal state 2 (pruning): if the state set is subset of the known_winners set, then stop
if state <= known_winners:
continue
# ----------Compute plurality score for the current remaining candidates--------------
plural_score = self.get_plurality_scores3(prefcounts, ordering, state, m_star)
minscore = min(plural_score.values())
for to_be_deleted in state:
if plural_score[to_be_deleted] == minscore:
child_state = state.copy()
child_state.remove(to_be_deleted)
tpc = tuple(sorted(child_state))
if tpc in hashtable2:
continue
else:
hashtable2.add(tpc)
child_node = Node(value=child_state)
stackNode.append(child_node)
return sorted(known_winners)
def STVtocwinners(self, profile):
"""
Returns an integer list that represents all possible winners of a profile under STV rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
ordering = profile.getOrderVectors()
prefcounts = profile.getPreferenceCounts()
len_prefcounts = len(prefcounts)
m = profile.numCands
rankmaps = profile.getRankMaps()
if min(ordering[0]) == 0:
startstate = set(range(m))
else:
startstate = set(range(1, m + 1))
# ordering, startstate = self.preprocessing(ordering, prefcounts, m, startstate)
# m_star = len(startstate)
known_winners = set()
# ----------Some statistics--------------
hashtable2 = set()
root = Node(value=startstate)
stackNode = []
stackNode.append(root)
while stackNode:
# ------------pop the current node-----------------
node = stackNode.pop()
# -------------------------------------------------
state = node.value.copy()
# use heuristic to delete all the candidates which satisfy the following condition
# goal state 1: if the state set contains only 1 candidate, then stop
if len(state) == 1 and list(state)[0] not in known_winners:
known_winners.add(list(state)[0])
continue
# goal state 2 (pruning): if the state set is subset of the known_winners set, then stop
if state <= known_winners:
continue
# ----------Compute plurality score for the current remaining candidates--------------
plural_score = self.get_plurality_scores4(prefcounts, rankmaps, state)
minscore = min(plural_score.values())
for to_be_deleted in state:
if plural_score[to_be_deleted] == minscore:
child_state = state.copy()
child_state.remove(to_be_deleted)
tpc = tuple(sorted(child_state))
if tpc in hashtable2:
continue
else:
hashtable2.add(tpc)
child_node = Node(value=child_state)
stackNode.append(child_node)
return sorted(known_winners)
def preprocessing(self, ordering, prefcounts, m, startstate):
plural_score = self.get_plurality_scores3(prefcounts, ordering, startstate, m)
state = set([key for key, value in plural_score.items() if value != 0])
ordering = self.construct_ordering(ordering, prefcounts, state)
plural_score = dict([(key, value) for key, value in plural_score.items() if value != 0])
minscore = min(plural_score.values())
to_be_deleted = [key for key, value in plural_score.items() if value == minscore]
if len(to_be_deleted) > 1:
return ordering, state
else:
while len(to_be_deleted) == 1 and len(state) > 1:
state.remove(to_be_deleted[0])
plural_score = self.get_plurality_scores3(prefcounts, ordering, state, m)
minscore = min(plural_score.values())
to_be_deleted = [key for key, value in plural_score.items() if value == minscore]
ordering = self.construct_ordering(ordering, prefcounts, state)
return ordering, state
def construct_ordering(self, ordering, prefcounts, state):
new_ordering = []
for i in range(len(prefcounts)):
new_ordering.append([x for x in ordering[i] if x in state])
return new_ordering
def get_plurality_scores3(self, prefcounts, ordering, state, m):
plural_score = {}
plural_score = plural_score.fromkeys(state, 0)
for i in range(len(prefcounts)):
for j in range(m):
if ordering[i][j] in state:
plural_score[ordering[i][j]] += prefcounts[i]
break
return plural_score
def get_plurality_scores4(self, prefcounts, rankmaps, state):
plural_score = {}
plural_score = plural_score.fromkeys(state, 0)
for i in range(len(prefcounts)):
temp = list(filter(lambda x: x[0] in state, list(rankmaps[i].items())))
min_value = min([value for key, value in temp])
for j in state:
if rankmaps[i][j] == min_value:
plural_score[j] += prefcounts[i]
return plural_score
class MechanismBaldwin():
"""
The Baldwin mechanism.
"""
def baldwin_winners(self, profile):
elecType = profile.getElecType()
if elecType == "soc" or elecType == "csv":
return self.baldwinsoc_winners(profile)
elif elecType == "toc":
return self.baldwintoc_winners(profile)
else:
print("ERROR: unsupported profile type")
exit()
def baldwinsoc_winners(self, profile):
"""
Returns an integer list that represents all possible winners of a profile under baldwin rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
ordering = profile.getOrderVectors()
m = profile.numCands
prefcounts = profile.getPreferenceCounts()
if min(ordering[0]) == 0:
startstate = set(range(m))
else:
startstate = set(range(1, m + 1))
wmg = self.getWmg2(prefcounts, ordering, startstate, normalize=False)
known_winners = set()
# ----------Some statistics--------------
hashtable2 = set()
# push the node of start state into the priority queue
root = Node(value=startstate)
stackNode = []
stackNode.append(root)
while stackNode:
# ------------pop the current node-----------------
node = stackNode.pop()
# -------------------------------------------------
state = node.value.copy()
# goal state 1: if the state set contains only 1 candidate, then stop
if len(state) == 1 and list(state)[0] not in known_winners:
known_winners.add(list(state)[0])
continue
# goal state 2 (pruning): if the state set is subset of the known_winners set, then stop
if state <= known_winners:
continue
# ----------Compute plurality score for the current remaining candidates--------------
plural_score = dict()
for cand in state:
plural_score[cand] = 0
for cand1, cand2 in itertools.permutations(state, 2):
plural_score[cand1] += wmg[cand1][cand2]
# if current state satisfies one of the 3 goal state, continue to the next loop
# After using heuristics, generate children and push them into priority queue
# frontier = [val for val in known_winners if val in state] + list(set(state) - set(known_winners))
minscore = min(plural_score.values())
for to_be_deleted in state:
if plural_score[to_be_deleted] == minscore:
child_state = state.copy()
child_state.remove(to_be_deleted)
tpc = tuple(sorted(child_state))
if tpc in hashtable2:
continue
else:
hashtable2.add(tpc)
child_node = Node(value=child_state)
stackNode.append(child_node)
return sorted(known_winners)
def baldwintoc_winners(self, profile):
"""
Returns an integer list that represents all possible winners of a profile under baldwin rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
ordering = profile.getOrderVectors()
m = profile.numCands
prefcounts = profile.getPreferenceCounts()
rankmaps = profile.getRankMaps()
if min(ordering[0]) == 0:
startstate = set(range(m))
else:
startstate = set(range(1, m + 1))
wmg = self.getWmg3(prefcounts, rankmaps, startstate, normalize=False)
known_winners = set()
# ----------Some statistics--------------
hashtable2 = set()
# push the node of start state into the priority queue
root = Node(value=startstate)
stackNode = []
stackNode.append(root)
while stackNode:
# ------------pop the current node-----------------
node = stackNode.pop()
# -------------------------------------------------
state = node.value.copy()
# goal state 1: if the state set contains only 1 candidate, then stop
if len(state) == 1 and list(state)[0] not in known_winners:
known_winners.add(list(state)[0])
continue
# goal state 2 (pruning): if the state set is subset of the known_winners set, then stop
if state <= known_winners:
continue
# ----------Compute plurality score for the current remaining candidates--------------
plural_score = dict()
for cand in state:
plural_score[cand] = 0
for cand1, cand2 in itertools.permutations(state, 2):
plural_score[cand1] += wmg[cand1][cand2]
# if current state satisfies one of the 3 goal state, continue to the next loop
# After using heuristics, generate children and push them into priority queue
# frontier = [val for val in known_winners if val in state] + list(set(state) - set(known_winners))
childbranch = 0
minscore = min(plural_score.values())
for to_be_deleted in state:
if plural_score[to_be_deleted] == minscore:
child_state = state.copy()
child_state.remove(to_be_deleted)
tpc = tuple(sorted(child_state))
if tpc in hashtable2:
continue
else:
hashtable2.add(tpc)
child_node = Node(value=child_state)
stackNode.append(child_node)
return sorted(known_winners)
def getWmg2(self, prefcounts, ordering, state, normalize=False):
"""
Generate a weighted majority graph that represents the whole profile. The function will
return a two-dimensional dictionary that associates integer representations of each pair of
candidates, cand1 and cand2, with the number of times cand1 is ranked above cand2 minus the
number of times cand2 is ranked above cand1.
:ivar bool normalize: If normalize is True, the function will return a normalized graph
where each edge has been divided by the value of the largest edge.
"""
# Initialize a new dictionary for our final weighted majority graph.
wmgMap = dict()
for cand in state:
wmgMap[cand] = dict()
for cand1, cand2 in itertools.combinations(state, 2):
wmgMap[cand1][cand2] = 0
wmgMap[cand2][cand1] = 0
# Go through the wmgMaps and increment the value of each edge in our final graph with the
# edges in each of the wmgMaps. We take into account the number of times that the vote
# occured.
for i in range(0, len(prefcounts)):
for cand1, cand2 in itertools.combinations(ordering[i], 2): # --------------------------
wmgMap[cand1][cand2] += prefcounts[i]
# By default, we assume that the weighted majority graph should not be normalized. If
# desired, we normalize by dividing each edge by the value of the largest edge.
if normalize == True:
maxEdge = float('-inf')
for cand in wmgMap.keys():
maxEdge = max(maxEdge, max(wmgMap[cand].values()))
for cand1 in wmgMap.keys():
for cand2 in wmgMap[cand1].keys():
wmgMap[cand1][cand2] = float(wmgMap[cand1][cand2]) / maxEdge
return wmgMap
def getWmg3(self, prefcounts, rankmaps, state, normalize=False):
"""
Generate a weighted majority graph that represents the whole profile. The function will
return a two-dimensional dictionary that associates integer representations of each pair of
candidates, cand1 and cand2, with the number of times cand1 is ranked above cand2 minus the
number of times cand2 is ranked above cand1.
:ivar bool normalize: If normalize is True, the function will return a normalized graph
where each edge has been divided by the value of the largest edge.
"""
# Initialize a new dictionary for our final weighted majority graph.
wmgMap = dict()
for cand in state:
wmgMap[cand] = dict()
for cand1, cand2 in itertools.combinations(state, 2):
wmgMap[cand1][cand2] = 0
wmgMap[cand2][cand1] = 0
# Go through the wmgMaps and increment the value of each edge in our final graph with the
# edges in each of the wmgMaps. We take into account the number of times that the vote
# occured.
for i in range(0, len(prefcounts)):
# print("wmgMap=",wmgMap)
for cand1, cand2 in itertools.combinations(rankmaps[i].keys(), 2): # --------------------------
# print("cand1=",cand1,"cand2=",cand2)
# print(rankmaps[0][cand1] , rankmaps[0][cand2])
if rankmaps[i][cand1] < rankmaps[i][cand2]:
wmgMap[cand1][cand2] += prefcounts[i]
elif rankmaps[i][cand1] > rankmaps[i][cand2]:
wmgMap[cand2][cand1] += prefcounts[i]
# print("wmgMap=", wmgMap)
# By default, we assume that the weighted majority graph should not be normalized. If
# desired, we normalize by dividing each edge by the value of the largest edge.
if normalize == True:
maxEdge = float('-inf')
for cand in wmgMap.keys():
maxEdge = max(maxEdge, max(wmgMap[cand].values()))
for cand1 in wmgMap.keys():
for cand2 in wmgMap[cand1].keys():
wmgMap[cand1][cand2] = float(wmgMap[cand1][cand2]) / maxEdge
print("wmg=", wmgMap)
return wmgMap
class MechanismCoombs():
def coombs_winners(self, profile):
"""
Returns an integer list that represents all possible winners of a profile under Coombs rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
elecType = profile.getElecType()
if elecType == "soc" or elecType == "csv":
return self.coombssoc_winners(profile)
elif elecType == "toc":
return self.coombstoc_winners(profile)
else:
print("ERROR: unsupported profile type")
exit()
def coombssoc_winners(self, profile):
"""
Returns an integer list that represents all possible winners of a profile under Coombs rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
ordering = profile.getOrderVectors()
m = profile.numCands
prefcounts = profile.getPreferenceCounts()
if min(ordering[0]) == 0:
startstate = set(range(m))
else:
startstate = set(range(1, m + 1))
known_winners = set()
# half = math.floor(n / 2.0)
# ----------Some statistics--------------
hashtable2 = set()
# push the node of start state into the priority queue
root = Node(value=startstate)
stackNode = []
stackNode.append(root)
while stackNode:
# ------------pop the current node----------------
node = stackNode.pop()
# -------------------------------------------------
state = node.value.copy()
# use heuristic to delete all the candidates which satisfy the following condition
# goal state 1: if the state set contains only 1 candidate, then stop
if len(state) == 1 and list(state)[0] not in known_winners:
known_winners.add(list(state)[0])
continue
# goal state 2 (pruning): if the state set is subset of the known_winners set, then stop
if state <= known_winners:
continue
# ----------Compute plurality score for the current remaining candidates-------------
reverse_veto_score = self.get_reverse_veto_scores(prefcounts, ordering, state, m)
# if current state satisfies one of the 3 goal state, continue to the next loop
# After using heuristics, generate children and push them into priority queue
# frontier = [val for val in known_winners if val in state] + list(set(state) - set(known_winners))
maxscore = max(reverse_veto_score.values())
for to_be_deleted in state:
if reverse_veto_score[to_be_deleted] == maxscore:
child_state = state.copy()
child_state.remove(to_be_deleted)
tpc = tuple(sorted(child_state))
if tpc in hashtable2:
continue
else:
hashtable2.add(tpc)
child_node = Node(value=child_state)
stackNode.append(child_node)
return sorted(known_winners)
def get_reverse_veto_scores(self, prefcounts, ordering, state, m):
plural_score = {}
plural_score = plural_score.fromkeys(state, 0)
for i in range(len(prefcounts)):
for j in range(m - 1, -1, -1):
if ordering[i][j] in state:
plural_score[ordering[i][j]] += prefcounts[i]
break
return plural_score
def coombstoc_winners(self, profile):
"""
Returns an integer list that represents all possible winners of a profile under Coombs rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
ordering = profile.getOrderVectors()
m = profile.numCands
prefcounts = profile.getPreferenceCounts()
rankmaps = profile.getRankMaps()
if min(ordering[0]) == 0:
startstate = set(range(m))
else:
startstate = set(range(1, m + 1))
known_winners = set()
# half = math.floor(n / 2.0)
# ----------Some statistics--------------
hashtable2 = set()
# push the node of start state into the priority queue
root = Node(value=startstate)
stackNode = []
stackNode.append(root)
while stackNode:
# ------------pop the current node----------------
node = stackNode.pop()
# -------------------------------------------------
state = node.value.copy()
# use heuristic to delete all the candidates which satisfy the following condition
# goal state 1: if the state set contains only 1 candidate, then stop
if len(state) == 1 and list(state)[0] not in known_winners:
known_winners.add(list(state)[0])
continue
# goal state 2 (pruning): if the state set is subset of the known_winners set, then stop
if state <= known_winners:
continue
# ----------Compute plurality score for the current remaining candidates-------------
reverse_veto_score = self.get_reverse_veto_scores2(prefcounts, rankmaps, state)
# print("reverse_veto_score = ",reverse_veto_score)
# if current state satisfies one of the 3 goal state, continue to the next loop
# After using heuristics, generate children and push them into priority queue
# frontier = [val for val in known_winners if val in state] + list(set(state) - set(known_winners))
maxscore = max(reverse_veto_score.values())
for to_be_deleted in state:
if reverse_veto_score[to_be_deleted] == maxscore:
child_state = state.copy()
child_state.remove(to_be_deleted)
tpc = tuple(sorted(child_state))
if tpc in hashtable2:
continue
else:
hashtable2.add(tpc)
child_node = Node(value=child_state)
stackNode.append(child_node)
return sorted(known_winners)
def get_reverse_veto_scores2(self, prefcounts, rankmaps, state):
plural_score = {}
plural_score = plural_score.fromkeys(state, 0)
for i in range(len(prefcounts)):
temp = list(filter(lambda x: x[0] in state, list(rankmaps[i].items())))
max_value = max([value for key, value in temp])
for j in state:
if rankmaps[i][j] == max_value:
plural_score[j] += prefcounts[i]
return plural_score
class MechanismRankedPairs():
"""
The Ranked Pairs mechanism.
This is the latest version created by Tyler Shepherd on
Nov. 3 2018, originally in two_loop_LP_sampling_LP_11_3.py.
"""
# debug_mode
# = 0: no output
# = 1: outputs only initial state
# = 2: outputs on stop conditions
# = 3: outputs all data
def __init__(self):
global debug_mode, BEGIN
self.debug_mode = 0
self.BEGIN = time.perf_counter()
# Timeout in seconds
self.TIMEOUT = 60 * 60 * 60
self.tau_for_testing = 0.05
class Stats:
# Stores statistics being measured and updated throughout procedure
"""
Stopping Conditions:
1: G U E is acyclic
2: possible_winners <= known_winners (pruning)
3: exactly 1 cand with in degree 0
4: G U Tier is acyclic (in max children method)
"""
def __init__(self):
self.discovery_states = dict()
self.discovery_times = dict()
self.num_nodes = 0
self.num_outer_nodes = 0
self.stop_condition_hits = {1: 0, 2: 0, 3: 0, 4: 0}
self.num_hashes = 0
self.num_initial_bridges = 0
self.num_redundant_edges = 0
self.num_sampled = 0
self.sampled = []
def output_graph(self, G):
# Draws the given graph G using networkx
pos = nx.circular_layout(G) # positions for all nodes
pos = dict(zip(sorted(pos.keys()), pos.values()))
# nodes
nx.draw_networkx_nodes(G, pos, node_size=350)
# edges
nx.draw_networkx_edges(G, pos, width=3, alpha=0.5, edge_color='b')
# labels
nx.draw_networkx_labels(G, pos, font_size=14, font_family='sans-serif')
plt.axis('off')
plt.savefig("weighted_graph.png") # save as png
plt.show() # display
def add_winners(self, G, I, known_winners, stats, possible_winners = None):
"""
Adds the winners of completed RP graph G
:param G: networkx graph, should be final resulting graph after running RP
:param I: list of all nodes
:param known_winners: list of winners found so far, will be updated
:param stats: Stats class storing run statistics
:param possible_winners: Can optionally pass in possible winners if already computed to avoid re-computing here
"""
if possible_winners is None:
G_in_degree = G.in_degree(I)
to_be_added = set([x[0] for x in G_in_degree if x[1] == 0])
else:
to_be_added = possible_winners
for c in to_be_added:
if c not in known_winners:
known_winners.add(c)
stats.discovery_states[c] = stats.num_nodes
stats.discovery_times[c] = time.perf_counter() - self.BEGIN
if self.debug_mode >= 2:
print("Found new winner:", c)
def stop_conditions(self, G, E, I, known_winners, stats):
"""
Determines if G, E state can be ended early
:param G: networkx DiGraph of the current representation of "locked in" edges in RP
:param E: networkx DiGraph of the remaining edges not yet considered
:param I: list of all nodes
:param known_winners: list of currently known PUT-winners
:param stats: Stats object containing runtime statistics
:return: -1 if no stop condition met, otherwise returns the int of the stop condition
"""
in_deg = G.in_degree(I)
possible_winners = [x[0] for x in in_deg if x[1] == 0]
# Stop Condition 2: Pruning. Possible winners are subset of known winners
if set(possible_winners) <= known_winners:
stats.stop_condition_hits[2] += 1
if self.debug_mode >= 2:
print("Stop Condition 2: pruned")
return 2
# Stop Condition 3: Exactly one node has indegree 0
if len(possible_winners) == 1:
stats.stop_condition_hits[3] += 1
if self.debug_mode >= 2:
print("Stop Condition 3: one cand in degree 0")
self.add_winners(G, I, known_winners, stats, possible_winners)
return 3
# Stop Condition 1: G U E is acyclic
temp_G = nx.compose(G, E)
if nx.is_directed_acyclic_graph(temp_G) is True:
stats.stop_condition_hits[1] += 1
if self.debug_mode >= 2:
print("Stop Condition 1: acyclic")
self.add_winners(G, I, known_winners, stats)
return 1
return -1
def getWinners(self, profile):
"""
Returns 1. a list of all PUT-winners of profile under ranked pairs rule
and 2. A Stats object of runtime statistics
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Initialize
stats = self.Stats()
wmg = profile.getWmg()
known_winners = set()
I = list(wmg.keys())
G = nx.DiGraph()
G.add_nodes_from(I)
E = nx.DiGraph()
E.add_nodes_from(I)
for cand1, cand2 in itertools.permutations(wmg.keys(), 2):
if wmg[cand1][cand2] > 0:
E.add_edge(cand1, cand2, weight=wmg[cand1][cand2])
# print(wmg)
# self.output_graph(E)
# Sample
num_samples = 200
for i in range(num_samples):
self.sample(E, I, known_winners, stats)
stats.num_sampled = len(known_winners)
stats.sampled = sorted(known_winners.copy())
# Start search
# Each node contains (G, E)
root = Node(value=(G, E))
stackNode = []
stackNode.append(root)
hashtable = set()
while stackNode:
# Pop new node to explore
node = stackNode.pop()
(G, E) = node.value
# Check hash
hash_state = hash(str(G.edges()) + str(E.edges()))
if hash_state in hashtable:
stats.num_hashes += 1
if self.debug_mode == 3:
print("hashed in outer hashtable")
continue
hashtable.add(hash_state)
stats.num_outer_nodes += 1
stats.num_nodes += 1
if self.debug_mode == 3:
print("Popped new node: ")
print("G:", G.edges())
print("E:", E.edges())
# Flag for whether expanding the current tier required finding max children
f_found_max_children = 0
# Continue performing RP on this state as long as tie-breaking order doesn't matter
while len(E.edges()) != 0:
if self.stop_conditions(G, E, I, known_winners, stats) != -1:
# Stop condition hit
break
(max_weight, max_edge) = max([(d['weight'], (u, v)) for (u, v, d) in E.edges(data=True)])
ties = [d['weight'] for (u, v, d) in E.edges(data=True)].count(max_weight)
if ties == 1:
# Tier only has one edge
if self.debug_mode == 3:
print("Only 1 edge in tier")
E.remove_edges_from([max_edge])
if nx.has_path(G, max_edge[1], max_edge[0]) is False:
G.add_edges_from([max_edge])
else:
# This tier has multiple edges with same max weight.
tier = [(u, v) for (u, v, d) in E.edges(data=True) if d['weight'] == max_weight]
if self.debug_mode == 3:
print("Tier =", tier)
E.remove_edges_from(tier)
# Compute "bridge edges" which are not in any cycle
Gc = G.copy()
Gc.add_edges_from(tier)
scc = [list(g.edges()) for g in nx.strongly_connected_component_subgraphs(Gc, copy=True) if
len(g.edges()) != 0]
bridges = set(Gc.edges()) - set(itertools.chain(*scc))
G.add_edges_from(bridges)
tier = list(set(tier) - bridges)
G_tc = nx.transitive_closure(G)
# Remove "inconsistent edges" that cannot be added to G without causing cycle
reverse_G = nx.DiGraph.reverse(G_tc)
tier = list(set(tier) - set(reverse_G.edges()))
# Remove "redundant edges": if there is already path from e[0] to e[1], can immediately add e
redundant_edges = set()
for e in tier:
if G_tc.has_edge(e[0], e[1]):
redundant_edges.add(e)
G.add_edges_from([e])
stats.num_redundant_edges += len(redundant_edges)
tier = list(set(tier) - redundant_edges)
if len(tier) == 0:
# No need to find max children, as tier is now empty
continue
max_children = self.find_max_children_scc_decomposition(G, tier, scc, bridges, I, known_winners, stats)
# Determine priority ordering of maximal children
children = dict()
index = 0
for child in max_children:
# child_node = Node(value=(self.edges2string(child.edges(), I), self.edges2string(E.edges(), I)))
child_node = Node(value=(child, E.copy()))
c_in_deg = child.in_degree(I)
available = set([x[0] for x in c_in_deg if x[1] == 0])
priority = len(available - known_winners)
# children[child_node] = (priority, index)
children[child_node] = index
child.add_nodes_from(I)
index += 1
continue
children_items = sorted(children.items(), key=lambda x: x[1])
sorted_children = [key for key, value in children_items]
stackNode += sorted_children
f_found_max_children = 1
break
# f_found_max_children is needed since, if we just added more nodes to stack, then current (G, E) is not actual valid state
if len(E.edges()) == 0 and f_found_max_children == 0:
# E is empty
if self.debug_mode >= 2:
print("E is empty")
self.add_winners(G, I, known_winners, stats)
return sorted(known_winners), stats
def edges2string(self, edges, I):
m = len(I)
gstring = list(str(0).zfill(m**2))
for e in edges:
gstring[(e[0] - min(I))*m + e[1] - min(I)] = '1'
return ''.join(gstring)
def string2edges(self, gstring, I):
m = len(I)
edges = []
for i in range(len(gstring)):
if gstring[i] == '1':
e1 = i % m + min(I)
e0 = int((i - e1) / m) + min(I)
edges.append((e0, e1))
return edges
def find_max_children_scc_decomposition(self, G, tier, scc, bridges, I, known_winners, stats):
'''
Finds the maximal children of G when tier is added using SCC decomposition
:param G: Networkx DiGraph of edges "locked in" so far
:param tier: List of edges in the current tier to be added with equal weight
:param scc: List of the strongly connected components of G U tier, each being a list of edges
:param bridges: List of edges that are bridges between sccs of G U tier
:param I: List of all nodes
:param known_winners: Known PUT-winners computed by RP so far
:param stats: Stats object containing runtime statistics
:return: Array of Networkx DiGraphs that are the maximal children of G U T
'''
if len(scc) == 1:
children = self.explore_max_children_lp(G, tier, I, known_winners, stats)
return children
mc_list = []
for x in scc:
G_temp = nx.DiGraph(list(set(G.edges()).intersection(set(x))))
T_temp = list(set(tier).intersection(set(x)))
temp = self.explore_max_children_lp(G_temp, T_temp, I, known_winners, stats, f_scc = 1)
mc_list.append(temp)
Cartesian = itertools.product(*mc_list)
return [nx.DiGraph(list(set(itertools.chain(*[list(y.edges()) for y in x])).union(bridges))) for x in Cartesian]
def explore_max_children_lp(self, G, tier, I, known_winners, stats, f_scc = 0):
"""
Computes the maximal children of G when tier is added
:param G: DiGraph, A directed graph
:param tier: list of tuples which correspond to multiple edges with same max weight.
e.g. edges = [x for x in wmg2.keys() if wmg2[x] == max_weight]
:param I: all nodes in G
:param known_winners: PUT-winners found so far by RP
:param stats: Stats object
:param f_scc: set to 1 if the G and tier being considered are an SCC of the full graph due to SCC decomposition
:return: set of graphs which correspond to maximum children of given parent: G
"""
# self.output_graph(G)
# self.output_graph(nx.DiGraph(tier))
max_children = []
cstack = []
# print("start mc:", time.perf_counter() - self.BEGIN)
hashtable = set()
if self.debug_mode >= 1:
print("Exploring max children")
print("G:", G.edges())
print("Tier:", tier)
print("Known winners:", known_winners)
print("---------------------------")
in_deg = G.in_degree()
nodes_with_no_incoming = set()
for x in in_deg:
if x[1] == 0:
nodes_with_no_incoming.add(x[0])
for x in I:
if x not in G.nodes():
nodes_with_no_incoming.add(x)
root = Node(value=(self.edges2string(G.edges(), I), self.edges2string(tier, I), nodes_with_no_incoming))
cstack.append(root)
END = self.BEGIN + self.TIMEOUT
while cstack:
node = cstack.pop()
(G_str, T_str, no_incoming) = node.value
if time.perf_counter() > END:
print("TIMEOUT")
return max_children
# Check hash. Doesn't ever happen if the below hash is included
hash_G = hash(G_str)
if hash_G in hashtable:
stats.num_hashes += 1
print('hash')
if self.debug_mode >= 2:
print("hashed in hashtable")
continue
hashtable.add(hash_G)
stats.num_nodes += 1
G = nx.DiGraph(self.string2edges(G_str, I))
T = self.string2edges(T_str, I)
G.add_nodes_from(I)
if self.debug_mode == 3:
print("popped")
print("G: ", G.edges())
print("T: ", T)
# goal state 2: if current G's possible winners is subset of known winners,
# then directly ignore it.
if no_incoming <= known_winners and not f_scc:
stats.stop_condition_hits[2] += 1
if self.debug_mode >= 3:
print("MC goal state 2: pruned")
continue
# goal state 1: if there are no edges to be added, then add the G_
if len(T) == 0:
max_children.append(G.copy())
if self.debug_mode >= 2:
print("MC goal state 1: no more edges in tier")
print("max child: ", G.edges())
continue
# goal state 3: if current G has exactly one cand with in degree 0, it is a PUT-winner
if len(no_incoming) == 1 and not f_scc:
stats.stop_condition_hits[3] += 1
if self.debug_mode >= 2:
print("MC goal state 3: only one cand in degree 0")
print("max child:", G.edges())
self.add_winners(G, I, known_winners, stats, no_incoming)
continue
# goal state 4: if union of current G and edges is acyclic,
# then directly add it to the max_children_set
Gc = G.copy()
Gc.add_edges_from(T)
if nx.is_directed_acyclic_graph(Gc):
stats.stop_condition_hits[4] += 1
hash_temp_G = hash(self.edges2string(Gc.edges(), I))
if hash_temp_G not in hashtable:
hashtable.add(hash_temp_G)
max_children.append(Gc)
if self.debug_mode >= 2:
print("MC goal state 4: G U T is acyclic")
print("max child:", Gc.edges())
else:
stats.num_hashes += 1
continue
# Perform reductions every step:
# Compute "bridge edges" which are not in any cycle
Gc = G.copy()
Gc.add_edges_from(T)
scc = [list(g.edges()) for g in nx.strongly_connected_component_subgraphs(Gc, copy=True) if
len(g.edges()) != 0]
bridges = set(Gc.edges()) - set(itertools.chain(*scc))
G.add_edges_from(bridges)
T = list(set(T) - bridges)
G_tc = nx.transitive_closure(G)
# Remove "inconsistent edges" that cannot be added to G without causing cycle
reverse_G = nx.DiGraph.reverse(G_tc)
T = list(set(T) - set(reverse_G.edges()))
# Remove "redundant edges": if there is already path from e[0] to e[1], can immediately add e
redundant_edges = set()
for e in T:
if G_tc.has_edge(e[0], e[1]):
redundant_edges.add(e)
G.add_edges_from([e])
stats.num_redundant_edges += len(redundant_edges)
T = list(set(T) - redundant_edges)
# Flag for whether adding any edge from T causes G to remain acyclic
f_isAcyclic = 0
children = dict()
# Used to break ties
index = 0
T = sorted(T)
for e in T:
G.add_edges_from([e])
Gc_str = self.edges2string(G.edges(), I)
if hash(Gc_str) in hashtable:
f_isAcyclic = 1
stats.num_hashes += 1
G.remove_edges_from([e])
continue
if not nx.has_path(G, source=e[1], target=e[0]):
f_isAcyclic = 1
Tc = copy.deepcopy(T)
Tc.remove(e)
# Remove the head of the edge if it had no incoming edges previously
no_incoming_c = no_incoming.copy()
no_incoming_c.discard(e[1])
child = Node(value=(Gc_str, self.edges2string(Tc, I), no_incoming_c))
priority = len(no_incoming_c - known_winners)
children[child] = (priority, index)
index = index + 1
if self.debug_mode == 3:
print("add new child with edge ", e, " and priority ", priority)
G.remove_edges_from([e])
children_items = sorted(children.items(), key=lambda x: (x[1][0], x[1][1]))
sorted_children = [key for key, value in children_items]
cstack += sorted_children
# goal state 5: adding all edges in T individually cause G to be cyclic
if f_isAcyclic == 0:
max_children.append(G.copy())
if self.debug_mode >= 2:
print("MC goal state 5 - found max child")
print("max child: ", G.edges())
continue
if self.debug_mode >= 1:
print("finished exploring max children")
print("num max children:", len(max_children))
print("PUT-winners:", known_winners)
return max_children
def sample(self, E, I, known_winners, stats):
'''
Using random tie-breaking, run through one procedure of RP and add resulting winner to known_winners
:param E: DiGraph, All postive edges in the wmg
:param I: List of all nodes in E
:param known_winners: Set of already-discovered PUT-winners to be added to
:param stats: Stats object storing runtime statistics
:return: Nothing
'''
G = nx.DiGraph()
G.add_nodes_from(I)
Ec = E.copy()
while len(Ec.edges()) != 0:
max_weight = max([(d['weight']) for (u, v, d) in Ec.edges(data=True)])
tier = [(u, v) for (u, v, d) in Ec.edges(data=True) if d['weight'] == max_weight]
# e = tier[random.randint(0, len(tier) -1 )]
priorities = []
potential_winners = set([x[0] for x in G.in_degree(I) if x[1] == 0])
base_priority = len(potential_winners - known_winners)
for e in tier:
if G.in_degree(e[1]) == 0 and e[1] not in known_winners:
priority = base_priority - 1
else:
priority = base_priority
priorities.append(exp(priority / self.tau_for_testing))
q_sum = sum(priorities)
probs = []
for v in priorities:
probs.append(v / q_sum)
legal_actions_index = [i for i in range(len(tier))]
e = tier[np.random.choice(legal_actions_index, p=probs)]
if not nx.has_path(G, e[1], e[0]):
G.add_edges_from([e])
Ec.remove_edges_from([e])
else:
Ec.remove_edges_from([e])
self.add_winners(G, I, known_winners, stats)
class MechanismBlack():
"""
The Black mechanism.
"""
def black_winner(self, profile):
"""
Returns a number or a list that associates the winner(s) of a profile under black rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "csv":
print("ERROR: unsupported election type")
exit()
wmg = profile.getWmg()
m = profile.numCands
for cand1 in wmg.keys():
outgoing = 0
for cand2 in wmg[cand1].keys():
if wmg[cand1][cand2] > 0:
outgoing += 1
if outgoing == m - 1:
return [cand1]
Borda_winner = MechanismBorda().getWinners(profile)
return Borda_winner
class MechanismPluralityRunOff():
"""
The Plurality with Runoff mechanism.
"""
def PluRunOff_single_winner(self, profile):
"""
Returns a number that associates the winner of a profile under Plurality with Runoff rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "csv":
print("ERROR: unsupported election type")
exit()
# Initialization
prefcounts = profile.getPreferenceCounts()
len_prefcounts = len(prefcounts)
rankmaps = profile.getRankMaps()
ranking = MechanismPlurality().getRanking(profile)
# 1st round: find the top 2 candidates in plurality scores
# Compute the 1st-place candidate in plurality scores
# print(ranking)
max_cand = ranking[0][0][0]
# Compute the 2nd-place candidate in plurality scores
# Automatically using tie-breaking rule--numerically increasing order
if len(ranking[0][0]) > 1:
second_max_cand = ranking[0][0][1]
else:
second_max_cand = ranking[0][1][0]
top_2 = [max_cand, second_max_cand]
# 2nd round: find the candidate with maximum plurality score
dict_top2 = {max_cand: 0, second_max_cand: 0}
for i in range(len_prefcounts):
vote_top2 = {key: value for key, value in rankmaps[i].items() if key in top_2}
top_position = min(vote_top2.values())
keys = [x for x in vote_top2.keys() if vote_top2[x] == top_position]
for key in keys:
dict_top2[key] += prefcounts[i]
# print(dict_top2)
winner = max(dict_top2.items(), key=lambda x: x[1])[0]
return winner
def PluRunOff_cowinners(self, profile):
"""
Returns a list that associates all the winners of a profile under Plurality with Runoff rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "csv":
print("ERROR: unsupported election type")
exit()
# Initialization
prefcounts = profile.getPreferenceCounts()
len_prefcounts = len(prefcounts)
rankmaps = profile.getRankMaps()
ranking = MechanismPlurality().getRanking(profile)
known_winners = set()
# 1st round: find the top 2 candidates in plurality scores
top_2_combinations = []
if len(ranking[0][0]) > 1:
for cand1, cand2 in itertools.combinations(ranking[0][0], 2):
top_2_combinations.append([cand1, cand2])
else:
max_cand = ranking[0][0][0]
if len(ranking[0][1]) > 1:
for second_max_cand in ranking[0][1]:
top_2_combinations.append([max_cand, second_max_cand])
else:
second_max_cand = ranking[0][1][0]
top_2_combinations.append([max_cand, second_max_cand])
# 2nd round: find the candidate with maximum plurality score
for top_2 in top_2_combinations:
dict_top2 = {top_2[0]: 0, top_2[1]: 0}
for i in range(len_prefcounts):
vote_top2 = {key: value for key, value in rankmaps[i].items() if key in top_2}
top_position = min(vote_top2.values())
keys = [x for x in vote_top2.keys() if vote_top2[x] == top_position]
for key in keys:
dict_top2[key] += prefcounts[i]
max_value = max(dict_top2.values())
winners = [y for y in dict_top2.keys() if dict_top2[y] == max_value]
known_winners = known_winners | set(winners)
return sorted(known_winners)
def getMov(self, profile):
"""
Returns an integer that is equal to the margin of victory of the election profile.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# from . import mov
import mov
return mov.MoVPluRunOff(profile)
"""
Multi-winner voting rules
"""
class MechanismSNTV():
"""
The Single non-transferable vote mechanism.
"""
def SNTV_winners(self, profile, K):
"""
Returns a list that associates all the winners of a profile under Single non-transferable vote rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "csv":
print("ERROR: unsupported election type")
exit()
m = profile.numCands
candScoresMap = MechanismPlurality().getCandScoresMap(profile)
if K >= m:
return list(candScoresMap.keys())
# print(candScoresMap)
sorted_items = sorted(candScoresMap.items(), key=lambda x: x[1], reverse=True)
sorted_dict = {key: value for key, value in sorted_items}
winners = list(sorted_dict.keys())[0:K]
return winners
def getMov(self, profile, K):
"""
Returns an integer that is equal to the margin of victory of the election profile.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# from . import mov
import mov
return mov.MoV_SNTV(profile, K)
class MechanismChamberlin_Courant():
"""
The Chamberlin–Courant mechanism.
"""
def single_peaked_winners(self, profile, d=1, K=3, funcType='Borda', scoringVector=[]):
"""
Returns a list that associates all the winners of a profile under The Chamberlin–Courant rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "csv":
print("ERROR: unsupported election type")
exit()
# ------------------1. INITIALIZATION-----------------------------
m = profile.numCands
n = profile.numVoters
cand = list(profile.candMap.keys())
cand.append(cand[m - 1] + 1)
theta = n - d
if funcType == 'Borda':
scoringVector = MechanismBorda().getScoringVector(profile)
z = dict()
for k in range(1, K + 2): # k = 1,...,K + 1
z[k] = dict()
for j in range(1, m + 2):
z[k][j] = dict()
for j in range(1, m + 2):
for t in range(0, theta + 1):
z[1][j][t] = self.s(profile, 1, j, t, {cand[j - 1]}, scoringVector)
for k in range(1, K + 1):
z[k + 1][j][t] = float("-inf")
# ------------------2. MAIN LOOP-----------------------------
for k in range(1, K + 1):
# Predecessors loop:
for p in range(1, m + 1):
for u in range(0, theta + 1):
if z[k][p][u] != float("-inf"):
# Successors sub-loop:
for j in range(p + 1, m + 2):
for t in range(u, theta + 1):
z[k + 1][j][t] = max(z[k + 1][j][t], z[k][p][u]
+ self.s(profile, p + 1, j, t - u, {cand[p - 1], cand[j - 1]},
scoringVector))
max_utility = z[K + 1][m + 1][theta]
print("max_utility=", max_utility)
# --------------------3. OUTPUT WINNERS---------------------------
winners = []
temp_max = max_utility
j = m + 1
t = theta
for k in range(K + 1, 1, -1):
z_k_j_t = array(
[[z[k - 1][p][u] + self.s(profile, p + 1, j, t - u, {cand[p - 1], cand[j - 1]}, scoringVector)
for u in range(0, theta + 1)] for p in range(1, m + 1)])
p_ind = where(temp_max == z_k_j_t)[0][0]
u_ind = where(temp_max == z_k_j_t)[0][0]
p0 = list(range(1, m + 1))[p_ind]
u0 = list(range(0, theta + 1))[u_ind]
winners.append(p0)
temp_max = z[k][p0][u0]
j = p0
t = u0
return sorted(winners)
def s(self, profile, l, j, t, S, scoringVector):
new_prefcounts, new_rankmaps = self.V(profile, l, j)
# print(new_prefcounts, new_rankmaps)
if t == 0 or len(new_prefcounts) == 0:
return float("-inf")
s_S = []
for i in range(len(new_prefcounts)):
s_S.append(max(scoringVector[new_rankmaps[i][x] - 1]
if x in new_rankmaps[i].keys() else float("-inf") for x in S))
ind = (-array(s_S)).argsort()
return dot(array(s_S)[ind][0:t], array(new_prefcounts)[ind][0:t])
def V(self, profile, l, j):
prefcounts = profile.getPreferenceCounts()
rankmaps = profile.getRankMaps()
cand = list(profile.candMap.keys())
m = len(cand)
cand.append(cand[m - 1] + 1)
new_prefcounts = []
new_rankmaps = []
for i in range(len(prefcounts)):
top_i = list(rankmaps[i].keys())[list(rankmaps[i].values()).index(1)]
if top_i in range(cand[l - 1], cand[j - 1] + 1):
new_prefcounts.append(prefcounts[i])
new_rankmaps.append(rankmaps[i])
return new_prefcounts, new_rankmaps
class MechanismBordaMean():
"""
The Borda-mean mechanism.
"""
def Borda_mean_winners(self, profile):
"""
Returns a list that associates all the winners of a profile under The Borda-mean rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
n_candidates = profile.numCands
prefcounts = profile.getPreferenceCounts()
len_prefcounts = len(prefcounts)
rankmaps = profile.getRankMaps()
values = zeros([len_prefcounts, n_candidates], dtype=int)
if min(list(rankmaps[0].keys())) == 0:
delta = 0
else:
delta = 1
for i in range(len_prefcounts):
for j in range(delta, n_candidates + delta):
values[i][j - delta] = rankmaps[i][j]
# print("values=", values)
mat0 = self._build_mat(values, n_candidates, prefcounts)
borda = [0 for i in range(n_candidates)]
for i in range(n_candidates):
borda[i] = sum([mat0[i, j] for j in range(n_candidates)])
borda_mean = mean(borda)
bin_winners_list = [int(borda[i] >= borda_mean) for i in range(n_candidates)]
return bin_winners_list
def _build_mat(self, ranks, n_candidates, prefcounts):
"""
Builds mxm matrix. Entry at i,j has #i>j - #i<j
:param ranks:
:return: mxm matrix
"""
mat = zeros((n_candidates, n_candidates))
for i, j in itertools.combinations(range(n_candidates), 2):
preference = ranks[:, i] - ranks[:, j]
h_ij = dot((preference < 0), prefcounts) # prefers i to j
h_ji = dot((preference > 0), prefcounts) # prefers j to i
mat[i, j] = h_ij - h_ji
mat[j, i] = h_ji - h_ij
return mat
"""
Simulate approval voting for any k-chotomous preferences where voters can have different values of k.
Borda-mean rule is applied to each vote, to change any k-chotomous preferences to dichotomous approval votes.
Each vote is simulated as an approval vote.
The approval rule is then used to aggregate these simulated approval votes.
Input:
ranks: a nxm matrix. rows are for votes, columns for candidates. i,j-th entry gives rank of candidate j by voter i.
only the relative numbers/positions matter.
Output:
winners: an m-dimensional array. j-th entry is 1 if candidate j is a winner by the approval voting rule.
approval_score: an m-dimenstional array: j-th entry gives approval score of candidate j.
"""
def simulated_approval(self, profile):
n_candidates = profile.numCands
n_voters = profile.numVoters
prefcounts = profile.getPreferenceCounts()
len_prefcounts = len(prefcounts)
rankmaps = profile.getRankMaps()
values = zeros([len_prefcounts, n_candidates], dtype=int)
if min(list(rankmaps[0].keys())) == 0:
delta = 0
else:
delta = 1
for i in range(len_prefcounts):
for j in range(delta, n_candidates + delta):
values[i][j - delta] = rankmaps[i][j]
approval = list()
for i in range(n_voters):
vote = array([list(values[i, :])])
approvals = self.borda_mean(vote)
approval.append(approvals)
return self.approval_rule(array(approval))
"""
Compute approval rule
Input:
approval: an nxm matrix of approval votes. i,j-th entry is 1 if voter i approves candidate j; 0 otherwise.
Output:
winners: an m-dimensional array. j-th entry is 1 if candidate j is a winner by the approval voting rule.
approval_score: an m-dimenstional array: j-th entry gives approval score of candidate j.
"""
def approval_rule(self, approval):
n_voters, n_candidates = approval.shape
approval_score = [0 for j in range(n_candidates)]
for i in range(n_voters):
approvals = approval[i, :]
approval_score = [approval_score[j] + approvals[j] for j in range(n_candidates)]
max_score = max(approval_score)
winners = list((array(approval_score) >= max_score).astype(int))
return winners, approval_score
"""
Build weighted tournament graph from any k-chotomous preferences. Different votes can have different values of k.
Input:
ranks: a nxm matrix. rows are for votes, columns for candidates. i,j-th entry gives rank of candidate j by voter i.
only the relative numbers/positions matter.
Output:
mat: a mxm matrix. i,j-th entry gives |i>j| - |j>i|, ties are ignored, can have -ve entries.
"""
def _build_mat_app(self, ranks):
n_voters, n_candidates = ranks.shape
mat = zeros((n_candidates, n_candidates))
for i, j in itertools.combinations(range(n_candidates), 2):
preference = ranks[:, i] - ranks[:, j]
h_ij = sum(preference < 0) # prefers i to j
h_ji = sum(preference > 0) # prefers j to i
mat[i, j] = h_ij - h_ji
mat[j, i] = h_ji - h_ij
return mat
"""
Compute the Borda mean rule.
Input:
ranks: a nxm matrix. rows are for votes, columns for candidates. i,j-th entry gives rank of candidate j by voter i.
only the relative numbers/positions matter.
Output:
winners: an m-dimensional array. j-th entry is 1 if candidate j is a winner by the Borda mean rule, 0 otherwise.
borda: m-deimensional array with Borda mean scores, can be used to generate a ranking.
Sum of edges from candidate j to every other candidate.
"""
def borda_mean(self, ranks):
mat = self._build_mat_app(ranks)
n_voters, n_candidates = ranks.shape
borda = [0 for i in range(n_candidates)]
for i in range(n_candidates):
borda[i] = sum([mat[i, j] for j in range(n_candidates)])
borda_mean = mean(borda)
winners = [int(borda[i] >= borda_mean) for i in range(n_candidates)]
return winners
class Node:
def __init__(self, value=None):
self.value = value
def __lt__(self, other):
return 0
def getvalue(self):
return self.value
| gpl-3.0 |
deepesch/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
terkkila/scikit-learn | examples/svm/plot_rbf_parameters.py | 57 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radius Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
BennerLab/atg | etc/merge_fastq.py | 1 | 1389 | #!/usr/bin/env python3
import os
import subprocess
import pandas
def merge_fastq(filename_list, execute=False):
"""
concatenate fastq.gz files that have been split across multiple sequencing lanes.
"""
fastq_df = pandas.DataFrame(filename_list, columns=['filename'])
fastq_merge_df = fastq_df.filename.apply(os.path.basename).str.extract(r'(.*)_S\d+_L\d\d\d_(R\d)_\d\d\d', expand=True).join(fastq_df)
fastq_merge_df['output'] = fastq_merge_df[0] + '_' + fastq_merge_df[1] + '.fastq.gz'
for output_name, group in fastq_merge_df.groupby('output'):
if execute:
command_args = ['cat'] + group['filename'].sort_values().tolist()
with open(output_name, 'w') as output:
subprocess.run(command_args, stdout=output, check=True)
else:
pandas.set_option("display.max_colwidth", 120)
print(output_name)
print("\t" + group.to_string(columns=['filename'], header=False, index=False, formatters={'filename': lambda x: "\t" + x}) + "\n")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('filename_list', nargs='+')
parser.add_argument('-x', '--execute', action='store_true', help="execute merge (just prints commands by default)")
args = parser.parse_args()
merge_fastq(args.filename_list, args.execute)
| gpl-3.0 |
wangyum/beam | sdks/python/apache_beam/examples/complete/juliaset/juliaset/juliaset.py | 6 | 4519 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Julia set computing workflow: https://en.wikipedia.org/wiki/Julia_set.
We use the quadratic polinomial f(z) = z*z + c, with c = -.62772 +.42193i
"""
from __future__ import absolute_import
import argparse
import apache_beam as beam
from apache_beam.io import WriteToText
def from_pixel(x, y, n):
"""Converts a NxN pixel position to a (-1..1, -1..1) complex number."""
return complex(2.0 * x / n - 1.0, 2.0 * y / n - 1.0)
def get_julia_set_point_color(element, c, n, max_iterations):
"""Given an pixel, convert it into a point in our julia set."""
x, y = element
z = from_pixel(x, y, n)
for i in xrange(max_iterations):
if z.real * z.real + z.imag * z.imag > 2.0:
break
z = z * z + c
return x, y, i # pylint: disable=undefined-loop-variable
def generate_julia_set_colors(pipeline, c, n, max_iterations):
"""Compute julia set coordinates for each point in our set."""
def point_set(n):
for x in range(n):
for y in range(n):
yield (x, y)
julia_set_colors = (pipeline
| 'add points' >> beam.Create(point_set(n))
| beam.Map(
get_julia_set_point_color, c, n, max_iterations))
return julia_set_colors
def generate_julia_set_visualization(data, n, max_iterations):
"""Generate the pixel matrix for rendering the julia set as an image."""
import numpy as np # pylint: disable=wrong-import-order, wrong-import-position
colors = []
for r in range(0, 256, 16):
for g in range(0, 256, 16):
for b in range(0, 256, 16):
colors.append((r, g, b))
xy = np.zeros((n, n, 3), dtype=np.uint8)
for x, y, iteration in data:
xy[x, y] = colors[iteration * len(colors) / max_iterations]
return xy
def save_julia_set_visualization(out_file, image_array):
"""Save the fractal image of our julia set as a png."""
from matplotlib import pyplot as plt # pylint: disable=wrong-import-order, wrong-import-position
plt.imsave(out_file, image_array, format='png')
def run(argv=None): # pylint: disable=missing-docstring
parser = argparse.ArgumentParser()
parser.add_argument('--grid_size',
dest='grid_size',
default=1000,
help='Size of the NxN matrix')
parser.add_argument(
'--coordinate_output',
dest='coordinate_output',
required=True,
help='Output file to write the color coordinates of the image to.')
parser.add_argument('--image_output',
dest='image_output',
default=None,
help='Output file to write the resulting image to.')
known_args, pipeline_args = parser.parse_known_args(argv)
with beam.Pipeline(argv=pipeline_args) as p:
n = int(known_args.grid_size)
coordinates = generate_julia_set_colors(p, complex(-.62772, .42193), n, 100)
def x_coord_key(x_y_i):
(x, y, i) = x_y_i
return (x, (x, y, i))
# Group each coordinate triplet by its x value, then write the coordinates
# to the output file with an x-coordinate grouping per line.
# pylint: disable=expression-not-assigned
(coordinates
| 'x coord key' >> beam.Map(x_coord_key)
| 'x coord' >> beam.GroupByKey()
| 'format' >> beam.Map(
lambda k_coords: ' '.join('(%s, %s, %s)' % c for c in k_coords[1]))
| WriteToText(known_args.coordinate_output))
# Optionally render the image and save it to a file.
# TODO(silviuc): Add this functionality.
# if p.options.image_output is not None:
# julia_set_image = generate_julia_set_visualization(
# file_with_coordinates, n, 100)
# save_julia_set_visualization(p.options.image_output, julia_set_image)
| apache-2.0 |
CartoDB/cartoframes | setup.py | 1 | 2389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def walk_subpkg(name):
data_files = []
package_dir = 'cartoframes'
for parent, _, files in os.walk(os.path.join(package_dir, name)):
# Remove package_dir from the path.
sub_dir = os.sep.join(parent.split(os.sep)[1:])
for _file in files:
data_files.append(os.path.join(sub_dir, _file))
return data_files
def get_version():
_version = {}
with open('cartoframes/_version.py') as fp:
exec(fp.read(), _version)
return _version['__version__']
REQUIRES = [
'appdirs>=1.4.3,<2.0',
'carto>=1.11.2,<2.0',
'jinja2>=2.10.1,<3.0',
'pandas>=0.25.0',
'geopandas>=0.6.0,<1.0',
'unidecode>=1.1.0,<2.0',
'semantic_version>=2.8.0,<3'
]
EXTRAS_REQUIRES_TESTS = [
'pytest',
'pytest-mock',
'pylint',
'flake8'
]
PACKAGE_DATA = {
'': [
'LICENSE',
'CONTRIBUTORS',
],
'cartoframes': [
'assets/*',
'assets/*.j2'
] + walk_subpkg('assets'),
}
DISTNAME = 'cartoframes'
DESCRIPTION = 'CARTO Python package for data scientists'
LICENSE = 'BSD'
URL = 'https://github.com/CartoDB/cartoframes'
AUTHOR = 'CARTO'
EMAIL = 'contact@carto.com'
setup(
name=DISTNAME,
version=get_version(),
description=DESCRIPTION,
long_description=open('README.rst').read(),
long_description_content_type='text/x-rst',
license=LICENSE,
url=URL,
author=AUTHOR,
author_email=EMAIL,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
keywords=['carto', 'data', 'science', 'maps', 'spatial', 'pandas'],
packages=find_packages(),
package_data=PACKAGE_DATA,
package_dir={'cartoframes': 'cartoframes'},
include_package_data=True,
install_requires=REQUIRES,
extras_require={
'tests': EXTRAS_REQUIRES_TESTS
},
python_requires='>=3.5'
)
| bsd-3-clause |
shamidreza/unitselection | generate_speech.py | 1 | 36237 | """
Author: Seyed Hamidreza Mohammadi
This file is part of the shamidreza/uniselection software.
Please refer to the LICENSE provided alongside the software (which is GPL v2,
http://www.gnu.org/licenses/gpl-2.0.html).
This file includes the code for synthesizing speech as output by getting the
unit sequence as input.
input:
1- unit sequence
2- target duration (not yet)
3- target pitch (not yet)
4- target formants (not yet)
output:
1- speech
"""
from utils import *
from extract_unit_info import *
import copy
def hanning2(t0l, t0r):
w = np.ones(t0l+t0r, dtype=np.float64)
w[:t0l] = 0.5 - 0.5 * np.cos(np.pi * np.arange(t0l) / (t0l - 1)) if t0l > 1 else 0.5
w[t0l:] = 0.5 + 0.5 * np.cos(np.pi * np.arange(t0r) / (t0r - 1)) if t0r > 1 else 0.5
return w
def hanning_mod2(t0l, t0r, alpha):
w = np.ones(t0l+t0r, dtype=np.float64)
n = int(np.round(t0l * alpha * 2))
w[:n] = 0.5 - 0.5 * np.cos(np.pi * np.arange(n) / (n - 1))
n = int(np.round(t0r * alpha * 2))
w[-n:] = 0.5 + 0.5 * np.cos(np.pi * np.arange(n) / (n - 1))
return w
def encode_har(s, F0, t0l, t0r, Fs):
def toeplitz(c):
c = np.asarray(c).ravel()
r = c.conjugate()
vals = np.concatenate((r[-1:0:-1], c))
a, b = np.ogrid[:len(c), len(r)-1:-1:-1]
return vals[a + b]
TwoPiJ = 2j * np.pi
numH = Fs // (2 * F0) # sinusoids UP to this Fs (but not necessarily the actual Fs as below)
DC = 1 # after Xiaochuan Niu
if DC:
l = np.arange(numH + 1)
else:
l = np.arange(numH) + 1
n = np.arange(t0l + t0r) - t0l
B = np.exp((TwoPiJ * F0 / Fs) * np.dot(n[:,np.newaxis], l[:,np.newaxis].T))
w = hanning2(t0l, t0r)
R = np.dot(np.diag(w), B)
t = np.dot(np.conj(R.T), R[:,0])
b = np.dot(np.conj(R.T), w * s)
H = np.conj(np.linalg.solve(0.5 * toeplitz(t), b).T)
if 0:
shat = np.dot(B, np.conj(H.T)).real
print(np.mean((s-shat)**2)**0.5) # RMS
from matplotlib import pyplot as pp
pp.plot(s)
pp.plot(shat)
pp.show()
if DC:
return H[1:] # don't return the DC component, just use it during fitting
else:
return H
def decode_har(H, F0, t0l, t0r, Fs):
TwoPiJ = 2j * np.pi
l = np.arange(len(H)) + 1 # no DC
n = np.arange(t0l + t0r) - t0l
B = np.exp((TwoPiJ * F0 / Fs) * np.dot(n[:,np.newaxis], l[:,np.newaxis].T))
#s=real(exp((j*2*pi*F0/Fs).*((-t0l:t0r-1)'*(1:length(H))))*H');
#s=real(exp((j*2*pi*F0/Fs).*((-t0l:t0r-1)'*(linspace(1,round(22050/2/F0),length(H)))))*H');
return np.dot(B, np.conj(H.T)).real
def warp_har(H, inp_frm, out_frm, F0, t0l, t0r, Fs):
M = np.abs(H)
P = np.angle(H)
P = np.unwrap(P)
w1 = np.arange(1, len(H) + 1) * F0
w2 = np.interp(w1, out_frm , inp_frm )
M[:] = np.interp(w2, w1, M)
P[:] = np.interp(w2, w1, P) # linear interpolation of unwrapped phase
Hnew = M * np.exp(P * 1j)
return Hnew
def concatenate_units_nooverlap(units, fnames):
wavs = np.zeros((16000*30),dtype=np.int16)
cur = 0
i = 0
while True:
st = units[i].starting_sample
st_ov = units[i].overlap_starting_sample
en = 0
j = i
for j in range(i, units.shape[0]-1):
if units[j].unit_id != units[j+1].unit_id-1:
break
en= units[j].ending_sample
en_ov= units[j].overlap_ending_sample
wav_name=corpus_path+'/wav/'+fnames[units[i].filename]+'.wav'
fs, wav = read_wav(wav_name)
wavs[cur:cur+(en-st)] = wav[st:en]
cur += (en-st)
i = j + 1
if i >= units.shape[0]:
break
return wavs[:cur]
def concatenate_units_overlap(units, fnames, overlap=0.2):
wavs = np.zeros((16000*10),dtype=np.int16)
wavs_debug = np.zeros((16000*10,units.shape[0]),dtype=np.int16)
cur = 0
i = 0
while True:
st = units[i].starting_sample
st_ov = units[i].overlap_starting_sample
en = 0
j = i
for j in range(i, units.shape[0]-1):
if units[j].unit_id != units[j+1].unit_id-1:
break
en= units[j].ending_sample
en_ov= units[j].overlap_ending_sample
wav_name=corpus_path+'/wav/'+fnames[units[i].filename]+'.wav'
fs, wav = read_wav(wav_name)
cur_wav = copy.deepcopy(wav[st-int(overlap*abs(st_ov-st)):en+int(overlap*abs(en_ov-en))])
cur_wav[:int(overlap*abs(st_ov-st))] *= np.linspace(0.0,1.0,int(overlap*abs(st_ov-st)))
cur_wav[-int(overlap*abs(en_ov-en)):] *= np.linspace(1.0,0.0,int(overlap*abs(en_ov-en)))
if cur-int(overlap*abs(st_ov-st)) < 0:
wavs[:cur-int(overlap*abs(st_ov-st))+cur_wav.shape[0]] += \
cur_wav[-(cur-int(overlap*abs(st_ov-st))+cur_wav.shape[0]):]
else:
wavs[cur-int(overlap*abs(st_ov-st)):cur-int(overlap*abs(st_ov-st))+cur_wav.shape[0]] += cur_wav
cur += (en-st)
i = j + 1
if i >= units.shape[0]:
break
return wavs[:cur]
def concatenate_units_duration_overlap(units, fnames, times, overlap=0.2):
wavs = np.zeros((16000*30),dtype=np.int16)
wavs_debug = np.zeros((16000*30,units.shape[0]),dtype=np.int16)
cur = 0
i = 0
while True:
st = units[i].starting_sample
st_ov = units[i].overlap_starting_sample
en = 0
j = i
for j in range(i, units.shape[0]-1):
if units[j].unit_id != units[j+1].unit_id-1:
break
en= units[j].ending_sample
en_ov= units[j].overlap_ending_sample
wav_name=corpus_path+'/wav/'+fnames[units[i].filename]+'.wav'
fs, wav = read_wav(wav_name)
cur_wav = copy.deepcopy(wav[st-int(overlap*abs(st_ov-st)):en+int(overlap*abs(en_ov-en))])
cur_wav[:int(overlap*abs(st_ov-st))] *= np.linspace(0.0,1.0,int(overlap*abs(st_ov-st)))
cur_wav[-int(overlap*abs(en_ov-en)):] *= np.linspace(1.0,0.0,int(overlap*abs(en_ov-en)))
if cur-int(overlap*abs(st_ov-st)) < 0:
wavs[:cur-int(overlap*abs(st_ov-st))+cur_wav.shape[0]] += \
cur_wav[-(cur-int(overlap*abs(st_ov-st))+cur_wav.shape[0]):]
else:
wavs[cur-int(overlap*abs(st_ov-st)):cur-int(overlap*abs(st_ov-st))+cur_wav.shape[0]] += cur_wav
cur += (en-st)
i = j + 1
if i >= units.shape[0]:
break
return wavs[:cur]
def pit2gci(times, pits, vox_times, vox_vals):
#times, pits, vox_times, vox_vals = read_hts_pit(pit_fname)
#pits += 20##
gcis = np.zeros((10000))
#gcis2 = np.zeros((1000000), dtype=np.uint32)
cur = 0
cur_pi = 0
cur_vi = 0
cnt = 0
mean_p = pits.mean()
std_p = pits.std()
while True:
if vox_vals[cur_vi] == 0: # unvoiced
period = 1.0/(mean_p+np.random.normal()*std_p)
else: # voiced
period = (1.0/pits[cur_pi])
#print cur, period*16000
cur+=period
if cur > times[-1]:
break
gcis[cnt] = cur
#gcis2[cur*16000] = 1
# find next closest cur_vi
for i in xrange(5):
if cur_vi+i+1 < len(vox_times):
if vox_times[cur_vi+i]<cur and vox_times[cur_vi+i+1]>cur:
cur_vi = cur_vi+i
break
if vox_vals[cur_vi]: # if voiced, find next closest cur_pi
closest_p = 10000000
closest_pi = 10000000
for i in xrange(5):
if abs(1.0*times[min(cur_pi+i,pits.shape[0]-1)]-cur) < closest_p:
closest_p = abs(1.0*times[min(cur_pi+i,pits.shape[0]-1)]-cur)
closest_pi = cur_pi+i
assert closest_p != 10000000
cur_pi = closest_pi
cnt += 1
gcis=gcis[:cnt]
return gcis
def units2gci(units, fnames):
cur = 0
i = 0
cnt = 0
gcis = []
while True:
st = units[i].starting_sample
j = i
for j in range(i, units.shape[0]-1): # find consecutive
if units[j].unit_id != units[j+1].unit_id-1:
break
en= units[j].ending_sample
pm_name=corpus_path+'/pm/'+fnames[units[i].filename]+'.pm'
cur_gcis = read_pm(pm_name)
cur_gcis = np.array(cur_gcis)
cur_gcis *= 16000.0
#cur_wav = copy.deepcopy(wav[st:en])
cur_first_gci, cur_last_gci = _select_gci_range(cur_gcis, st, en)
if not gcis:
start_from = 0
else:
start_from = gcis[-1]
gcis = gcis + (cur_gcis[cur_first_gci:cur_last_gci+1]-cur_gcis[cur_first_gci]+start_from).tolist()[1:]
cur += (cur_gcis[cur_last_gci]-cur_gcis[cur_first_gci])
i = j + 1
cnt += 1
if i >= units.shape[0]:
break
return gcis
def units2dur(units, fnames):
cur = 0
i = 0
times = [0.0]
while True:
st=units[i].starting_sample
en= units[i].ending_sample
times.append(times[-1]+en-st)
i += 1
if i >= units.shape[0]:
break
return times
def units2for(units, fnames, times, for_time, for_val):
new_for_time = np.zeros(100000)
new_for_val = np.zeros((100000,for_val.shape[1]))
cur_new = 0
cur = 0
i = 0
while True:
st=units[i].starting_sample
en= units[i].ending_sample
ust = times[i]
uen = times[i+1]
ust_nearest = np.abs(ust-for_time).argmin()
uen_nearest = np.abs(uen-for_time).argmin()
st_nearest = cur_new
en_nearest = st_nearest + (en-st)/80#framesize
for k in range(for_val.shape[1]):
new_for_val[st_nearest:en_nearest,k] = \
np.interp(np.linspace(0.0,1.0,en_nearest-st_nearest),
np.linspace(0.0,1.0,uen_nearest-ust_nearest),
for_val[ust_nearest:uen_nearest,k])
new_for_time[st_nearest:en_nearest] = new_for_time[cur_new-1] + 80 +\
np.arange(en_nearest-st_nearest)*80
cur_new += en_nearest-st_nearest
i += 1
if i >= units.shape[0]:
break
new_for_val = new_for_val[:cur_new,:]
new_for_time = new_for_time[:cur_new]
return new_for_time, new_for_val
def _select_gci_range(gcis, st, en):
first_gci = 1000000
for i in range(gcis.shape[0]):
if gcis[i] > st:
first_gci = i
break
assert first_gci != 1000000
last_gci = 1000000
for j in range(first_gci, gcis.shape[0]):
if gcis[j] > en:
last_gci = j-1
break
if last_gci == 1000000:
last_gci = gcis.shape[0]-1
return first_gci, last_gci
def _psola(output_gcis, input_gcis, input_wav):
output_gcis = (output_gcis).astype(np.int32)
input_gcis = (input_gcis).astype(np.int32)
num_input_frames = input_gcis.shape[0]-2
num_output_frames = output_gcis.shape[0]-2
out_wav = np.zeros((output_gcis[-1]-output_gcis[0]))
out_wav_debug = np.zeros((output_gcis[-1]-output_gcis[0], 1000))
for i in range(1, output_gcis.shape[0]-1):
sample_out = (output_gcis[i]-output_gcis[0])/float(output_gcis[-1]-output_gcis[0])
#sample_inp = input_gcis[i]/float(input_gcis[-1]-input_gcis[0])
#sample_out = 1+int(sample_out*num_output_frames)
sample_inp = 1+int(sample_out*num_input_frames)
left_input_size = input_gcis[sample_inp]-input_gcis[sample_inp-1]
left_output_size = output_gcis[i]-output_gcis[i-1]
left_inp = input_wav[input_gcis[sample_inp-1]:input_gcis[sample_inp]]
left_out = np.zeros(left_output_size)
left_out[-1*min(left_input_size, left_output_size):] = \
copy.deepcopy(left_inp[-1*min(left_input_size, left_output_size):])
##left_out *= np.linspace(0.0, 1.0, left_out.shape[0])
# linear
#left_out[-1*min(left_input_size, left_output_size):] *= \
#np.linspace(0.0, 1.0, min(left_input_size, left_output_size))
# hanning
left_out[-1*min(left_input_size, left_output_size):] *= \
np.hanning(min(left_input_size, left_output_size)*2)[:min(left_input_size, left_output_size)]
#left_out *= \
#np.hanning(left_out.shape[0]*2)[:left_out.shape[0]]
#np.linspace(0.0, 1.0, min(left_input_size, left_output_size))
right_input_size = input_gcis[sample_inp+1]-input_gcis[sample_inp]
right_output_size = output_gcis[i+1]-output_gcis[i]
right_inp = input_wav[input_gcis[sample_inp]:input_gcis[sample_inp+1]]
right_out = np.zeros(right_output_size)
right_out[:min(right_output_size,right_input_size)] = \
copy.deepcopy(right_inp[:min(right_output_size,right_input_size)])
##right_out *= np.linspace(1.0, 0.0, right_out.shape[0])
# linear
#right_out[:min(right_output_size,right_input_size)] *= \
#np.linspace(1.0, 0.0, min(right_output_size,right_input_size))
# hanning
right_out[:min(right_output_size,right_input_size)] *= \
np.hanning(min(right_output_size,right_input_size)*2)[min(right_output_size,right_input_size):]
#right_out *= \
#np.hanning(right_out.shape[0]*2)[right_out.shape[0]:]
#np.linspace(1.0, 0.0, min(right_output_size,right_input_size))
if 1: # true psola
out_wav[output_gcis[i-1]-output_gcis[0]:output_gcis[i+1]-output_gcis[0]] += np.r_[left_out, right_out]
out_wav_debug[output_gcis[i-1]-output_gcis[0]:output_gcis[i+1]-output_gcis[0], i-1] = np.r_[left_out, right_out]
else: # only right
out_wav[output_gcis[i]-output_gcis[0]:output_gcis[i+1]-output_gcis[0]] = right_out
out_wav_debug[output_gcis[i]-output_gcis[0]:output_gcis[i+1]-output_gcis[0], i-1] = right_out
if 0: ## vis
ax=pp.subplot(311)
pp.plot(out_wav)
pp.plot(output_gcis-output_gcis[0], np.ones(output_gcis.shape[0])*2000, '*')
pp.subplot(312,sharex=ax)
for j in range(output_gcis.shape[0]-2):
pp.plot(out_wav_debug[:,j])
pp.plot(output_gcis-output_gcis[0], np.ones(output_gcis.shape[0])*2000, '*')
pp.subplot(313,sharex=ax)
pp.plot(input_wav[input_gcis[0]:input_gcis[-1]])
pp.plot(input_gcis-input_gcis[0], np.ones(input_gcis.shape[0])*2000, '*')
pp.show()
return out_wav
def _psola_har(output_gcis, input_gcis, input_wav):
output_gcis = (output_gcis).astype(np.int32)
input_gcis = (input_gcis).astype(np.int32)
num_input_frames = input_gcis.shape[0]-2
num_output_frames = output_gcis.shape[0]-2
out_wav = np.zeros((output_gcis[-1]-output_gcis[0]))
out_wav_debug = np.zeros((output_gcis[-1]-output_gcis[0], 1000))
for i in range(1, output_gcis.shape[0]-1):
sample_out = (output_gcis[i]-output_gcis[0])/float(output_gcis[-1]-output_gcis[0])
#sample_inp = input_gcis[i]/float(input_gcis[-1]-input_gcis[0])
#sample_out = 1+int(sample_out*num_output_frames)
sample_inp = 1+int(sample_out*num_input_frames)
left_input_size = input_gcis[sample_inp]-input_gcis[sample_inp-1]
left_output_size = output_gcis[i]-output_gcis[i-1]
left_inp = input_wav[input_gcis[sample_inp-1]:input_gcis[sample_inp]]
left_out = np.zeros(left_output_size)
left_out[-1*min(left_input_size, left_output_size):] = \
copy.deepcopy(left_inp[-1*min(left_input_size, left_output_size):])
##left_out *= np.linspace(0.0, 1.0, left_out.shape[0])
# linear
#left_out[-1*min(left_input_size, left_output_size):] *= \
#np.linspace(0.0, 1.0, min(left_input_size, left_output_size))
# hanning
left_out[-1*min(left_input_size, left_output_size):] *= \
np.hanning(min(left_input_size, left_output_size)*2)[:min(left_input_size, left_output_size)]
#left_out *= \
#np.hanning(left_out.shape[0]*2)[:left_out.shape[0]]
#np.linspace(0.0, 1.0, min(left_input_size, left_output_size))
right_input_size = input_gcis[sample_inp+1]-input_gcis[sample_inp]
right_output_size = output_gcis[i+1]-output_gcis[i]
right_inp = input_wav[input_gcis[sample_inp]:input_gcis[sample_inp+1]]
right_out = np.zeros(right_output_size)
right_out[:min(right_output_size,right_input_size)] = \
copy.deepcopy(right_inp[:min(right_output_size,right_input_size)])
##right_out *= np.linspace(1.0, 0.0, right_out.shape[0])
# linear
#right_out[:min(right_output_size,right_input_size)] *= \
#np.linspace(1.0, 0.0, min(right_output_size,right_input_size))
# hanning
right_out[:min(right_output_size,right_input_size)] *= \
np.hanning(min(right_output_size,right_input_size)*2)[min(right_output_size,right_input_size):]
#right_out *= \
#np.hanning(right_out.shape[0]*2)[right_out.shape[0]:]
#np.linspace(1.0, 0.0, min(right_output_size,right_input_size))
if 1: # true psola
t0l = left_out.shape[0]
t0r = right_out.shape[0]
f0 = 2.0*16000.0/(t0l+t0r)
har=encode_har(np.r_[left_out, right_out], f0, t0l, t0r, 16000)
ww=decode_har(har, f0, t0l, t0r, 16000)*hanning_mod2(t0l, t0r, 0.25)
out_wav[output_gcis[i-1]-output_gcis[0]:output_gcis[i+1]-output_gcis[0]] += ww
out_wav_debug[output_gcis[i-1]-output_gcis[0]:output_gcis[i+1]-output_gcis[0], i-1] = ww
else: # only right
out_wav[output_gcis[i]-output_gcis[0]:output_gcis[i+1]-output_gcis[0]] = right_out
out_wav_debug[output_gcis[i]-output_gcis[0]:output_gcis[i+1]-output_gcis[0], i-1] = right_out
if 0: ## vis
ax=pp.subplot(311)
pp.plot(out_wav)
pp.plot(output_gcis-output_gcis[0], np.ones(output_gcis.shape[0])*2000, '*')
pp.subplot(312,sharex=ax)
for j in range(output_gcis.shape[0]-2):
pp.plot(out_wav_debug[:,j])
pp.plot(output_gcis-output_gcis[0], np.ones(output_gcis.shape[0])*2000, '*')
pp.subplot(313,sharex=ax)
pp.plot(input_wav[input_gcis[0]:input_gcis[-1]])
pp.plot(input_gcis-input_gcis[0], np.ones(input_gcis.shape[0])*2000, '*')
pp.show()
return out_wav
def _psola_har_warp(output_gcis, input_gcis, input_wav, src_frm, trg_frm):
output_gcis = (output_gcis).astype(np.int32)
input_gcis = (input_gcis).astype(np.int32)
num_input_frames = input_gcis.shape[0]-2
num_output_frames = output_gcis.shape[0]-2
out_wav = np.zeros((output_gcis[-1]-output_gcis[0]))
out_wav_debug = np.zeros((output_gcis[-1]-output_gcis[0], 1000))
for i in range(1, output_gcis.shape[0]-1):
sample_out = (output_gcis[i]-output_gcis[0])/float(output_gcis[-1]-output_gcis[0])
#sample_inp = input_gcis[i]/float(input_gcis[-1]-input_gcis[0])
#sample_out = 1+int(sample_out*num_output_frames)
sample_inp = 1+int(sample_out*num_input_frames)
left_input_size = input_gcis[sample_inp]-input_gcis[sample_inp-1]
left_output_size = output_gcis[i]-output_gcis[i-1]
left_inp = input_wav[input_gcis[sample_inp-1]:input_gcis[sample_inp]]
left_out = np.zeros(left_output_size)
left_out[-1*min(left_input_size, left_output_size):] = \
copy.deepcopy(left_inp[-1*min(left_input_size, left_output_size):])
##left_out *= np.linspace(0.0, 1.0, left_out.shape[0])
# linear
#left_out[-1*min(left_input_size, left_output_size):] *= \
#np.linspace(0.0, 1.0, min(left_input_size, left_output_size))
# hanning
left_out[-1*min(left_input_size, left_output_size):] *= \
np.hanning(min(left_input_size, left_output_size)*2)[:min(left_input_size, left_output_size)]
#left_out *= \
#np.hanning(left_out.shape[0]*2)[:left_out.shape[0]]
#np.linspace(0.0, 1.0, min(left_input_size, left_output_size))
right_input_size = input_gcis[sample_inp+1]-input_gcis[sample_inp]
right_output_size = output_gcis[i+1]-output_gcis[i]
right_inp = input_wav[input_gcis[sample_inp]:input_gcis[sample_inp+1]]
right_out = np.zeros(right_output_size)
right_out[:min(right_output_size,right_input_size)] = \
copy.deepcopy(right_inp[:min(right_output_size,right_input_size)])
##right_out *= np.linspace(1.0, 0.0, right_out.shape[0])
# linear
#right_out[:min(right_output_size,right_input_size)] *= \
#np.linspace(1.0, 0.0, min(right_output_size,right_input_size))
# hanning
right_out[:min(right_output_size,right_input_size)] *= \
np.hanning(min(right_output_size,right_input_size)*2)[min(right_output_size,right_input_size):]
#right_out *= \
#np.hanning(right_out.shape[0]*2)[right_out.shape[0]:]
#np.linspace(1.0, 0.0, min(right_output_size,right_input_size))
if 1: # true psola
t0l = left_out.shape[0]
t0r = right_out.shape[0]
f0 = 2.0*16000.0/(t0l+t0r)
har=encode_har(np.r_[left_out, right_out], f0, t0l, t0r, 16000)
if trg_frm.shape[0] != 0:
cur_src_frm = np.r_[0.0]
for k in range(4):
#cur_src_frm=np.r_[cur_src_frm, src_frm[int(src_frm.shape[0]*i/float(output_gcis.shape[0])),k]-\
#src_frm[int(src_frm.shape[0]*i/float(output_gcis.shape[0])),k+4]/2.0 ]
#cur_src_frm=np.r_[cur_src_frm, src_frm[int(src_frm.shape[0]*i/float(output_gcis.shape[0])),k]+\
#src_frm[int(src_frm.shape[0]*i/float(output_gcis.shape[0])),k+4]/2.0 ]
cur_src_frm=np.r_[cur_src_frm, src_frm[int(src_frm.shape[0]*i/float(output_gcis.shape[0])),k]]
cur_src_frm=np.r_[cur_src_frm, 8000.0]
for k in range(cur_src_frm.shape[0]-1):
if cur_src_frm[k+1] < cur_src_frm[k]:
cur_src_frm[k+1] = cur_src_frm[k]
cur_trg_frm = np.r_[0.0]
for k in range(4):
#cur_trg_frm=np.r_[cur_trg_frm, trg_frm[int(trg_frm.shape[0]*i/float(output_gcis.shape[0])),k]-\
#trg_frm[int(trg_frm.shape[0]*i/float(output_gcis.shape[0])),k+4]/2.0 ]
#cur_trg_frm=np.r_[cur_trg_frm, trg_frm[int(trg_frm.shape[0]*i/float(output_gcis.shape[0])),k]+\
#trg_frm[int(trg_frm.shape[0]*i/float(output_gcis.shape[0])),k+4]/2.0 ]
cur_trg_frm=np.r_[cur_trg_frm, trg_frm[int(trg_frm.shape[0]*i/float(output_gcis.shape[0])),k]]
cur_trg_frm=np.r_[cur_trg_frm, 8000.0]
for k in range(cur_trg_frm.shape[0]-1):
if cur_trg_frm[k+1] < cur_trg_frm[k]:
cur_trg_frm[k+1] = cur_trg_frm[k]
else:
cur_src_frm = np.r_[0.0]
for k in range(4):
cur_src_frm=np.r_[cur_src_frm, src_frm[int(src_frm.shape[0]*i/float(output_gcis.shape[0])),k]-\
src_frm[int(src_frm.shape[0]*i/float(output_gcis.shape[0])),k+4]/2.0 ]
cur_src_frm=np.r_[cur_src_frm, src_frm[int(src_frm.shape[0]*i/float(output_gcis.shape[0])),k]+\
src_frm[int(src_frm.shape[0]*i/float(output_gcis.shape[0])),k+4]/2.0 ]
cur_src_frm=np.r_[cur_src_frm, 8000.0]
for k in range(cur_src_frm.shape[0]-1):
if cur_src_frm[k+1] < cur_src_frm[k]:
cur_src_frm[k+1] = cur_src_frm[k]
cur_trg_frm = cur_src_frm.copy()
har2=warp_har(har, cur_src_frm, cur_trg_frm, f0, t0l, t0r, 16000)
ww=decode_har(har2, f0, t0l, t0r, 16000)*hanning_mod2(t0l, t0r, 0.25)
#sm=np.sum(ww**2)
#ww/=np.abs(ww).max()##$
#ww*=10000.0
if np.abs(ww).max() > 20000:
ww/= 2
out_wav[output_gcis[i-1]-output_gcis[0]:output_gcis[i+1]-output_gcis[0]] += ww
out_wav_debug[output_gcis[i-1]-output_gcis[0]:output_gcis[i+1]-output_gcis[0], i-1] = ww
else: # only right
out_wav[output_gcis[i]-output_gcis[0]:output_gcis[i+1]-output_gcis[0]] = right_out
out_wav_debug[output_gcis[i]-output_gcis[0]:output_gcis[i+1]-output_gcis[0], i-1] = right_out
if 0: ## vis
ax=pp.subplot(311)
pp.plot(out_wav)
pp.plot(output_gcis-output_gcis[0], np.ones(output_gcis.shape[0])*2000, '*')
pp.subplot(312,sharex=ax)
for j in range(output_gcis.shape[0]-2):
pp.plot(out_wav_debug[:,j])
pp.plot(output_gcis-output_gcis[0], np.ones(output_gcis.shape[0])*2000, '*')
pp.subplot(313,sharex=ax)
pp.plot(input_wav[input_gcis[0]:input_gcis[-1]])
pp.plot(input_gcis-input_gcis[0], np.ones(input_gcis.shape[0])*2000, '*')
pp.show()
return out_wav
def concatenate_units_psola_nooverlap(units, fnames, times, gcis):
wavs = np.zeros((16000*30),dtype=np.int16)
wavs_debug = np.zeros((16000*30,units.shape[0]),dtype=np.int16)
cur = 0
i = 0
cnt = 0
while True:
st = units[i].starting_sample
en = 0
j = i
cur_dur = 0
for j in range(i, units.shape[0]-1): # find consecutive
if units[j].unit_id != units[j+1].unit_id-1:
break
cur_dur += (times[j+1]-times[j])
cur_dur += (times[j+1]-times[j])
#if j//2-1>=0:
# cur_dur += (times[1+j//2]-times[j//2])//2
first_gci, last_gci = _select_gci_range(gcis, cur, cur+cur_dur)
en= units[j].ending_sample
en_ov= units[j].overlap_ending_sample
wav_name=corpus_path+'/wav/'+fnames[units[i].filename]+'.wav'
fs, wav = read_wav(wav_name)
pm_name=corpus_path+'/pm/'+fnames[units[i].filename]+'.pm'
cur_gcis = read_pm(pm_name)
cur_gcis = np.array(cur_gcis)
cur_gcis *= 16000.0
#cur_wav = copy.deepcopy(wav[st:en])
cur_first_gci, cur_last_gci = _select_gci_range(cur_gcis, st, en)
cur_wav=_psola(gcis[first_gci:last_gci+1], cur_gcis[cur_first_gci:cur_last_gci+1], wav)
wavs[gcis[first_gci]:gcis[last_gci]] += cur_wav.astype(np.int16)
wavs_debug[gcis[first_gci]:gcis[last_gci], cnt] += cur_wav.astype(np.int16)
#assert cur_dur == cur_wav.shape[0]
#cur += (en-st)
cur += (gcis[last_gci]-gcis[first_gci])
i = j + 1
cnt += 1
if i >= units.shape[0]:
break
if 1: ## vis
for j in range(cnt):
pp.plot(wavs_debug[:cur,j])
pp.show()
return wavs[:cur]
def concatenate_units_psola_overlap(units, fnames, times, gcis, overlap=0.1):
wavs = np.zeros((16000*30),dtype=np.int16)
wavs_debug = np.zeros((16000*30,units.shape[0]),dtype=np.int16)
cur = 0
i = 0
cnt = 0
while True:
st = units[i].starting_sample-int(overlap*(units[i].starting_sample-units[i].overlap_starting_sample))
en = 0
j = i
cur_dur = 0
for j in range(i, units.shape[0]-1): # find consecutive
if units[j].unit_id != units[j+1].unit_id-1:
break
cur_dur += (times[j+1]-times[j])
cur_dur += (times[j+1]-times[j])
#if j//2-1>=0:
# cur_dur += (times[1+j//2]-times[j//2])//2
st_ov = int(overlap*(units[i].starting_sample-units[i].overlap_starting_sample))
en_ov = int(overlap*(units[i].overlap_ending_sample-units[i].ending_sample))
cur_ov = cur-st_ov
if cur_ov < 0:
cur_ov = 0
st_ov = 0
cur_dur_ov = cur_dur + en_ov
first_gci, last_gci = _select_gci_range(gcis, cur, cur+cur_dur)
first_gci_ov, last_gci_ov = _select_gci_range(gcis, cur_ov, cur+cur_dur_ov)
en= units[j].ending_sample+en_ov
wav_name=corpus_path+'/wav/'+fnames[units[i].filename]+'.wav'
fs, wav = read_wav(wav_name)
pm_name=corpus_path+'/pm/'+fnames[units[i].filename]+'.pm'
cur_gcis = read_pm(pm_name)
cur_gcis = np.array(cur_gcis)
cur_gcis *= 16000.0
#cur_wav = copy.deepcopy(wav[st:en])
cur_first_gci, cur_last_gci = _select_gci_range(cur_gcis, st, en)
cur_wav=_psola(gcis[first_gci_ov:last_gci_ov+1], cur_gcis[cur_first_gci:cur_last_gci+1], wav)
wavs[gcis[first_gci_ov]:gcis[last_gci_ov]] += cur_wav.astype(np.int16)
wavs_debug[gcis[first_gci]:gcis[last_gci], cnt] +=\
cur_wav[gcis[first_gci]-gcis[first_gci_ov]:cur_wav.shape[0]-\
(gcis[last_gci_ov]-gcis[last_gci])].astype(np.int16)
wavs_debug[gcis[first_gci_ov]:gcis[first_gci], cnt] +=\
cur_wav[:gcis[first_gci]-gcis[first_gci_ov]].astype(np.int16) *\
np.linspace(0.0, 1.0, gcis[first_gci]-gcis[first_gci_ov])
wavs_debug[gcis[last_gci]:gcis[last_gci_ov], cnt] +=\
cur_wav[cur_wav.shape[0]-(gcis[last_gci_ov]-gcis[last_gci]):].astype(np.int16) *\
np.linspace(1.0, 0.0, gcis[last_gci_ov]-gcis[last_gci])
#assert cur_dur == cur_wav.shape[0]
#cur += (en-st)
cur += (gcis[last_gci]-gcis[first_gci])
i = j + 1
cnt += 1
if i >= units.shape[0]:
break
if 1: ## vis
for j in range(cnt):
pp.plot(wavs_debug[:cur,j])
pp.show()
return wavs[:cur]
def concatenate_units_psola_har_overlap(units, fnames, old_times, times, gcis, trg_frm_time, trg_frm_val, overlap=0.5):
wavs = np.zeros((16000*30),dtype=np.int16)
wavs_debug = np.zeros((16000*30,units.shape[0]),dtype=np.int16)
cur = 0
i = 0
cnt = 0
frm1 =np.zeros((100000,4))
frm2 =np.zeros((100000,4))
frm_cnt = 0
while True:
st = units[i].starting_sample-int(overlap*(units[i].starting_sample-units[i].overlap_starting_sample))
en = 0
j = i
cur_dur = 0
for j in range(i, units.shape[0]-1): # find consecutive
if units[j].unit_id != units[j+1].unit_id-1:
break
cur_dur += (times[j+1]-times[j])
cur_dur += (times[j+1]-times[j])
#if j//2-1>=0:
# cur_dur += (times[1+j//2]-times[j//2])//2
st_ov = int(overlap*(units[i].starting_sample-units[i].overlap_starting_sample))
en_ov = int(overlap*(units[i].overlap_ending_sample-units[i].ending_sample))
cur_ov = cur-st_ov
if cur_ov < 0:
cur_ov = 0
st_ov = 0
cur_dur_ov = cur_dur + en_ov
first_gci, last_gci = _select_gci_range(gcis, cur, cur+cur_dur)
first_gci_ov, last_gci_ov = _select_gci_range(gcis, cur_ov, cur+cur_dur_ov)
en= units[j].ending_sample+en_ov
wav_name=corpus_path+'/wav/'+fnames[units[i].filename]+'.wav'
fs, wav = read_wav(wav_name)
pm_name=corpus_path+'/pm/'+fnames[units[i].filename]+'.pm'
cur_gcis = read_pm(pm_name)
cur_gcis = np.array(cur_gcis)
cur_gcis *= 16000.0
#cur_wav = copy.deepcopy(wav[st:en])
cur_first_gci, cur_last_gci = _select_gci_range(cur_gcis, st, en)
ftime, fval = get_formant(wav, 16000)
ftime *= 16000
inp_frm_st = np.abs(cur_gcis[cur_first_gci]-ftime).argmin()
inp_frm_en = np.abs(cur_gcis[cur_last_gci]-ftime).argmin()
out_frm_st = np.abs(gcis[first_gci_ov]-trg_frm_time).argmin()
out_frm_en = np.abs(gcis[last_gci_ov]-trg_frm_time).argmin()
#cur_wav=_psola_har(gcis[first_gci_ov:last_gci_ov+1], cur_gcis[cur_first_gci:cur_last_gci+1], wav)
#pp.plot(fval[inp_frm_st:inp_frm_en,:4],'b')
#pp.plot(trg_frm_val[out_frm_st:out_frm_en,:4],'g')
#pp.show()
LEN = (out_frm_en-out_frm_st)
if inp_frm_st+LEN > fval.shape[0]:
LEN = fval.shape[0] - inp_frm_st
if 1: # align for
ust = old_times[i]
uen = old_times[j+1]
ust_nearest = np.abs(ust-trg_frm_time).argmin()
uen_nearest = np.abs(uen-trg_frm_time).argmin()
st_nearest = frm_cnt
en_nearest = st_nearest + (en-st)/80#framesize
new_for_val = np.zeros((LEN, 8))
for k in range(trg_frm_val.shape[1]):
new_for_val[:, k] = \
np.interp(np.linspace(0.0,1.0,LEN),
np.linspace(0.0,1.0,uen_nearest-ust_nearest),
trg_frm_val[ust_nearest:uen_nearest,k])
#LEN = (inp_frm_en-inp_frm_st)
#if LEN > (out_frm_en-out_frm_st):
#LEN = (out_frm_en-out_frm_st)
frm1[frm_cnt:frm_cnt+LEN,:] = fval[inp_frm_st:inp_frm_st+LEN,:4]
#frm1[frm_cnt:frm_cnt+LEN,:] = (new_for_val[:, :4]-new_for_val[:, :4].mean(0))+fval[inp_frm_st:inp_frm_st+LEN,:4].mean(0)
#frm2[frm_cnt:frm_cnt+LEN,:] = new_for_val[:, :4]
frm2[frm_cnt:frm_cnt+LEN,:] = fval[inp_frm_st:inp_frm_st+LEN,:4]#(fval[inp_frm_st:inp_frm_st+LEN,:4]-fval[inp_frm_st:inp_frm_st+LEN,:4].mean(0))+new_for_val[:, :4].mean(0)
frm2[frm_cnt:frm_cnt+LEN,:][frm2[frm_cnt:frm_cnt+LEN,:]<0]=0.0
cur_wav = _psola_har_warp(gcis[first_gci_ov:last_gci_ov+1],
cur_gcis[cur_first_gci:cur_last_gci+1],
wav,
frm1[frm_cnt:frm_cnt+LEN,:],
frm2[frm_cnt:frm_cnt+LEN,:])
wavs[gcis[first_gci_ov]:gcis[last_gci_ov]] += cur_wav.astype(np.int16)
wavs_debug[gcis[first_gci]:gcis[last_gci], cnt] +=\
cur_wav[gcis[first_gci]-gcis[first_gci_ov]:cur_wav.shape[0]-\
(gcis[last_gci_ov]-gcis[last_gci])].astype(np.int16)
wavs_debug[gcis[first_gci_ov]:gcis[first_gci], cnt] +=\
cur_wav[:gcis[first_gci]-gcis[first_gci_ov]].astype(np.int16) *\
np.linspace(0.0, 1.0, gcis[first_gci]-gcis[first_gci_ov])
wavs_debug[gcis[last_gci]:gcis[last_gci_ov], cnt] +=\
cur_wav[cur_wav.shape[0]-(gcis[last_gci_ov]-gcis[last_gci]):].astype(np.int16) *\
np.linspace(1.0, 0.0, gcis[last_gci_ov]-gcis[last_gci])
#assert cur_dur == cur_wav.shape[0]
#cur += (en-st)
frm_cnt += LEN
cur += (gcis[last_gci]-gcis[first_gci])
i = j + 1
cnt += 1
print i, units.shape[0]
if i >= units.shape[0]:
break
if 1: ## vis
frm1 = frm1[:frm_cnt,:]
frm2 = frm2[:frm_cnt,:]
pp.plot(frm1,'b')
pp.plot(frm2,'g')
pp.show()
for j in range(cnt):
pp.plot(wavs_debug[:cur,j])
pp.show()
return wavs[:cur]
if __name__ == "__main__":
fname = 'arctic_a0007'
lab_name=corpus_path+'/lab/'+fname+'.lab'
wav_name=corpus_path+'/wav/'+fname+'.wav'
pm_name=corpus_path+'/pm/'+fname+'.pm'
inp_gcis=read_pm(pm_name)
inp_gcis = np.array(inp_gcis)
time, pit = gci2pit(inp_gcis)
#times, pits, vox_times, vox_vals = pit_nozero_2_pit_vox(time, pit)
times=time
pits=pit
vox_times=[0.0, time[-1]]
vox_vals = [1.0]
inp_gcis=pit2gci(times, pits, vox_times, vox_vals)
out_gcis=pit2gci(times, pits, vox_times, vox_vals)
inp_gcis *= 16000
out_gcis *= 16000
wav_name=corpus_path+'/wav/'+fname+'.wav'
fs, wav = read_wav(wav_name)
out_wav = _psola(out_gcis, inp_gcis, wav)
out_wav = out_wav.astype(np.int16)
#pp.plot(out_wav);pp.show()
from scipy.io.wavfile import write as wwrite
wwrite('out.wav', 16000, out_wav)
print 'successfully saved out.wav' | gpl-2.0 |
jakevdp/seaborn | setup.py | 6 | 3621 | #! /usr/bin/env python
#
# Copyright (C) 2012-2014 Michael Waskom <mwaskom@stanford.edu>
import os
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
DESCRIPTION = "Seaborn: statistical data visualization"
LONG_DESCRIPTION = """\
Seaborn is a library for making attractive and informative statistical graphics in Python. It is built on top of matplotlib and tightly integrated with the PyData stack, including support for numpy and pandas data structures and statistical routines from scipy and statsmodels.
Some of the features that seaborn offers are
- Several built-in themes that improve on the default matplotlib aesthetics
- Tools for choosing color palettes to make beautiful plots that reveal patterns in your data
- Functions for visualizing univariate and bivariate distributions or for comparing them between subsets of data
- Tools that fit and visualize linear regression models for different kinds of independent and dependent variables
- Functions that visualize matrices of data and use clustering algorithms to discover structure in those matrices
- A function to plot statistical timeseries data with flexible estimation and representation of uncertainty around the estimate
- High-level abstractions for structuring grids of plots that let you easily build complex visualizations
"""
DISTNAME = 'seaborn'
MAINTAINER = 'Michael Waskom'
MAINTAINER_EMAIL = 'mwaskom@stanford.edu'
URL = 'http://stanford.edu/~mwaskom/software/seaborn/'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/mwaskom/seaborn/'
VERSION = '0.6.dev'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
def check_dependencies():
install_requires = []
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import scipy
except ImportError:
install_requires.append('scipy')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import pandas
except ImportError:
install_requires.append('pandas')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['seaborn', 'seaborn.external', 'seaborn.tests'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Multimedia :: Graphics',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
)
| bsd-3-clause |
shadowleaves/acr | parse.py | 1 | 1774 | #!/usr/bin/env python
import os
# import csv
import gzip
import bson
import pandas as pd
from collections import OrderedDict
def main():
# file = '~/Dropbox/intraday/DOW_dates.csv'
# dates = pd.read_csv(file, index_col=1, header=None)
# dates = pd.DatetimeIndex(dates.index)
path = '$HOME/Dropbox/intraday/bson/bbl1_equities_all/AAPL/'
path = os.path.expandvars(path)
mapping = {'open_px': 'open',
'high_px': 'high',
'low_px': 'low',
'close_px': 'close',
'value': 'value',
'volume': 'volume',
'spd': 'spd',
}
res = pd.DataFrame()
for file in sorted(os.listdir(path)):
filepath = os.path.join(path, file)
df = []
with gzip.open(filepath, 'rb') as bson_file:
for doc in bson.decode_file_iter(bson_file):
df.append(doc)
df = pd.DataFrame(df).set_index('bucket_start_time')
df.index.name = None
df['spd'] = df['best_ask_px'] - df['best_bid_px']
df['value'] = df['vwap'] * df['volume']
df = df[mapping.keys()]
df.columns = mapping.values()
how = OrderedDict([('open', 'first'),
('high', 'max'),
('low', 'min'),
('close', 'last'),
('spd', 'mean'),
('volume', 'sum'),
('value', 'sum'),
])
df = df.resample('15Min', how=how)
df['vwap'] = df['value'] / df['volume']
df = df.drop('value', axis=1)
res = pd.concat((res, df), axis=0)
import pdb
pdb.set_trace()
if __name__ == '__main__':
main()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.