repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
Barmaley-exe/scikit-learn | sklearn/feature_selection/rfe.py | 3 | 15243 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import _check_cv as check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params={}, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
if estimator.coef_.ndim > 1:
ranks = np.argsort(safe_sqr(estimator.coef_).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(estimator.coef_))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, support_], y)
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to (n_features + step - 2) // step + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params={}, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
rfe = RFE(estimator=self.estimator, n_features_to_select=1,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
scores = np.zeros(X.shape[1])
n_features_to_select_by_rank = np.zeros(X.shape[1])
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
# Compute a full ranking of the features
# ranking_ contains the same set of values for all CV folds,
# but perhaps reordered
ranking_ = rfe.fit(X_train, y_train).ranking_
# Score each subset of features
for k in range(0, np.max(ranking_)):
indices = np.where(ranking_ <= k + 1)[0]
estimator = clone(self.estimator)
estimator.fit(X_train[:, indices], y_train)
score = _score(estimator, X_test[:, indices], y_test, scorer)
if self.verbose > 0:
print("Finished fold with %d / %d feature ranks, score=%f"
% (k + 1, np.max(ranking_), score))
scores[k] += score
# n_features_to_select_by_rank[k] is being overwritten
# multiple times, but by the same value
n_features_to_select_by_rank[k] = indices.size
# Select the best upper bound for feature rank. It's OK to use the
# last ranking_, as np.max(ranking_) is the same over all CV folds.
scores = scores[:np.max(ranking_)]
k = np.argmax(scores)
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select_by_rank[k],
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
PrashntS/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
f3r/scikit-learn | sklearn/utils/multiclass.py | 13 | 12964 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/io/wb.py | 9 | 12688 | # -*- coding: utf-8 -*-
from __future__ import print_function
from pandas.compat import map, reduce, range, lrange
from pandas.io.common import urlopen
from pandas.io import json
import pandas
import numpy as np
import warnings
warnings.warn("\n"
"The pandas.io.wb module is moved to a separate package "
"(pandas-datareader) and will be removed from pandas in a "
"future version.\nAfter installing the pandas-datareader package "
"(https://github.com/pydata/pandas-datareader), you can change "
"the import ``from pandas.io import data, wb`` to "
"``from pandas_datareader import data, wb``.",
FutureWarning)
# This list of country codes was pulled from wikipedia during October 2014.
# While some exceptions do exist, it is the best proxy for countries supported
# by World Bank. It is an aggregation of the 2-digit ISO 3166-1 alpha-2, and
# 3-digit ISO 3166-1 alpha-3, codes, with 'all', 'ALL', and 'All' appended ot
# the end.
country_codes = ['AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AO', 'AQ', 'AR', \
'AS', 'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BB', 'BD', 'BE', \
'BF', 'BG', 'BH', 'BI', 'BJ', 'BL', 'BM', 'BN', 'BO', 'BQ', \
'BR', 'BS', 'BT', 'BV', 'BW', 'BY', 'BZ', 'CA', 'CC', 'CD', \
'CF', 'CG', 'CH', 'CI', 'CK', 'CL', 'CM', 'CN', 'CO', 'CR', \
'CU', 'CV', 'CW', 'CX', 'CY', 'CZ', 'DE', 'DJ', 'DK', 'DM', \
'DO', 'DZ', 'EC', 'EE', 'EG', 'EH', 'ER', 'ES', 'ET', 'FI', \
'FJ', 'FK', 'FM', 'FO', 'FR', 'GA', 'GB', 'GD', 'GE', 'GF', \
'GG', 'GH', 'GI', 'GL', 'GM', 'GN', 'GP', 'GQ', 'GR', 'GS', \
'GT', 'GU', 'GW', 'GY', 'HK', 'HM', 'HN', 'HR', 'HT', 'HU', \
'ID', 'IE', 'IL', 'IM', 'IN', 'IO', 'IQ', 'IR', 'IS', 'IT', \
'JE', 'JM', 'JO', 'JP', 'KE', 'KG', 'KH', 'KI', 'KM', 'KN', \
'KP', 'KR', 'KW', 'KY', 'KZ', 'LA', 'LB', 'LC', 'LI', 'LK', \
'LR', 'LS', 'LT', 'LU', 'LV', 'LY', 'MA', 'MC', 'MD', 'ME', \
'MF', 'MG', 'MH', 'MK', 'ML', 'MM', 'MN', 'MO', 'MP', 'MQ', \
'MR', 'MS', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY', 'MZ', 'NA', \
'NC', 'NE', 'NF', 'NG', 'NI', 'NL', 'NO', 'NP', 'NR', 'NU', \
'NZ', 'OM', 'PA', 'PE', 'PF', 'PG', 'PH', 'PK', 'PL', 'PM', \
'PN', 'PR', 'PS', 'PT', 'PW', 'PY', 'QA', 'RE', 'RO', 'RS', \
'RU', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SG', 'SH', 'SI', \
'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SR', 'SS', 'ST', 'SV', \
'SX', 'SY', 'SZ', 'TC', 'TD', 'TF', 'TG', 'TH', 'TJ', 'TK', \
'TL', 'TM', 'TN', 'TO', 'TR', 'TT', 'TV', 'TW', 'TZ', 'UA', \
'UG', 'UM', 'US', 'UY', 'UZ', 'VA', 'VC', 'VE', 'VG', 'VI', \
'VN', 'VU', 'WF', 'WS', 'YE', 'YT', 'ZA', 'ZM', 'ZW', \
'ABW', 'AFG', 'AGO', 'AIA', 'ALA', 'ALB', 'AND', 'ARE', \
'ARG', 'ARM', 'ASM', 'ATA', 'ATF', 'ATG', 'AUS', 'AUT', \
'AZE', 'BDI', 'BEL', 'BEN', 'BES', 'BFA', 'BGD', 'BGR', \
'BHR', 'BHS', 'BIH', 'BLM', 'BLR', 'BLZ', 'BMU', 'BOL', \
'BRA', 'BRB', 'BRN', 'BTN', 'BVT', 'BWA', 'CAF', 'CAN', \
'CCK', 'CHE', 'CHL', 'CHN', 'CIV', 'CMR', 'COD', 'COG', \
'COK', 'COL', 'COM', 'CPV', 'CRI', 'CUB', 'CUW', 'CXR', \
'CYM', 'CYP', 'CZE', 'DEU', 'DJI', 'DMA', 'DNK', 'DOM', \
'DZA', 'ECU', 'EGY', 'ERI', 'ESH', 'ESP', 'EST', 'ETH', \
'FIN', 'FJI', 'FLK', 'FRA', 'FRO', 'FSM', 'GAB', 'GBR', \
'GEO', 'GGY', 'GHA', 'GIB', 'GIN', 'GLP', 'GMB', 'GNB', \
'GNQ', 'GRC', 'GRD', 'GRL', 'GTM', 'GUF', 'GUM', 'GUY', \
'HKG', 'HMD', 'HND', 'HRV', 'HTI', 'HUN', 'IDN', 'IMN', \
'IND', 'IOT', 'IRL', 'IRN', 'IRQ', 'ISL', 'ISR', 'ITA', \
'JAM', 'JEY', 'JOR', 'JPN', 'KAZ', 'KEN', 'KGZ', 'KHM', \
'KIR', 'KNA', 'KOR', 'KWT', 'LAO', 'LBN', 'LBR', 'LBY', \
'LCA', 'LIE', 'LKA', 'LSO', 'LTU', 'LUX', 'LVA', 'MAC', \
'MAF', 'MAR', 'MCO', 'MDA', 'MDG', 'MDV', 'MEX', 'MHL', \
'MKD', 'MLI', 'MLT', 'MMR', 'MNE', 'MNG', 'MNP', 'MOZ', \
'MRT', 'MSR', 'MTQ', 'MUS', 'MWI', 'MYS', 'MYT', 'NAM', \
'NCL', 'NER', 'NFK', 'NGA', 'NIC', 'NIU', 'NLD', 'NOR', \
'NPL', 'NRU', 'NZL', 'OMN', 'PAK', 'PAN', 'PCN', 'PER', \
'PHL', 'PLW', 'PNG', 'POL', 'PRI', 'PRK', 'PRT', 'PRY', \
'PSE', 'PYF', 'QAT', 'REU', 'ROU', 'RUS', 'RWA', 'SAU', \
'SDN', 'SEN', 'SGP', 'SGS', 'SHN', 'SJM', 'SLB', 'SLE', \
'SLV', 'SMR', 'SOM', 'SPM', 'SRB', 'SSD', 'STP', 'SUR', \
'SVK', 'SVN', 'SWE', 'SWZ', 'SXM', 'SYC', 'SYR', 'TCA', \
'TCD', 'TGO', 'THA', 'TJK', 'TKL', 'TKM', 'TLS', 'TON', \
'TTO', 'TUN', 'TUR', 'TUV', 'TWN', 'TZA', 'UGA', 'UKR', \
'UMI', 'URY', 'USA', 'UZB', 'VAT', 'VCT', 'VEN', 'VGB', \
'VIR', 'VNM', 'VUT', 'WLF', 'WSM', 'YEM', 'ZAF', 'ZMB', \
'ZWE', 'all', 'ALL', 'All']
def download(country=['MX', 'CA', 'US'], indicator=['NY.GDP.MKTP.CD', 'NY.GNS.ICTR.ZS'],
start=2003, end=2005,errors='warn'):
"""
Download data series from the World Bank's World Development Indicators
Parameters
----------
indicator: string or list of strings
taken from the ``id`` field in ``WDIsearch()``
country: string or list of strings.
``all`` downloads data for all countries
2 or 3 character ISO country codes select individual
countries (e.g.``US``,``CA``) or (e.g.``USA``,``CAN``). The codes
can be mixed.
The two ISO lists of countries, provided by wikipedia, are hardcoded
into pandas as of 11/10/2014.
start: int
First year of the data series
end: int
Last year of the data series (inclusive)
errors: str {'ignore', 'warn', 'raise'}, default 'warn'
Country codes are validated against a hardcoded list. This controls
the outcome of that validation, and attempts to also apply
to the results from world bank.
errors='raise', will raise a ValueError on a bad country code.
Returns
-------
``pandas`` DataFrame with columns: country, iso_code, year,
indicator value.
"""
if type(country) == str:
country = [country]
bad_countries = np.setdiff1d(country, country_codes)
# Validate the input
if len(bad_countries) > 0:
tmp = ", ".join(bad_countries)
if errors == 'raise':
raise ValueError("Invalid Country Code(s): %s" % tmp)
if errors == 'warn':
warnings.warn('Non-standard ISO country codes: %s' % tmp)
# Work with a list of indicators
if type(indicator) == str:
indicator = [indicator]
# Download
data = []
bad_indicators = {}
for ind in indicator:
one_indicator_data,msg = _get_data(ind, country, start, end)
if msg == "Success":
data.append(one_indicator_data)
else:
bad_indicators[ind] = msg
if len(bad_indicators.keys()) > 0:
bad_ind_msgs = [i + " : " + m for i,m in bad_indicators.items()]
bad_ind_msgs = "\n\n".join(bad_ind_msgs)
bad_ind_msgs = "\n\nInvalid Indicators:\n\n%s" % bad_ind_msgs
if errors == 'raise':
raise ValueError(bad_ind_msgs)
if errors == 'warn':
warnings.warn(bad_ind_msgs)
# Confirm we actually got some data, and build Dataframe
if len(data) > 0:
out = reduce(lambda x, y: x.merge(y, how='outer'), data)
out = out.drop('iso_code', axis=1)
out = out.set_index(['country', 'year'])
out = out._convert(datetime=True, numeric=True)
return out
else:
msg = "No indicators returned data."
if errors == 'ignore':
msg += " Set errors='warn' for more information."
raise ValueError(msg)
def _get_data(indicator="NY.GNS.ICTR.GN.ZS", country='US',
start=2002, end=2005):
if type(country) == str:
country = [country]
countries = ';'.join(country)
# Build URL for api call
url = ("http://api.worldbank.org/countries/" + countries + "/indicators/" +
indicator + "?date=" + str(start) + ":" + str(end) +
"&per_page=25000&format=json")
# Download
with urlopen(url) as response:
data = response.read()
# Check to see if there is a possible problem
possible_message = json.loads(data)[0]
if 'message' in possible_message.keys():
msg = possible_message['message'][0]
try:
msg = msg['key'].split() + ["\n "] + msg['value'].split()
wb_err = ' '.join(msg)
except:
wb_err = ""
if 'key' in msg.keys():
wb_err = msg['key'] + "\n "
if 'value' in msg.keys():
wb_err += msg['value']
error_msg = "Problem with a World Bank Query \n %s"
return None, error_msg % wb_err
if 'total' in possible_message.keys():
if possible_message['total'] == 0:
return None, "No results from world bank."
# Parse JSON file
data = json.loads(data)[1]
country = [x['country']['value'] for x in data]
iso_code = [x['country']['id'] for x in data]
year = [x['date'] for x in data]
value = [x['value'] for x in data]
# Prepare output
out = pandas.DataFrame([country, iso_code, year, value]).T
out.columns = ['country', 'iso_code', 'year', indicator]
return out,"Success"
def get_countries():
'''Query information about countries
'''
url = 'http://api.worldbank.org/countries/?per_page=1000&format=json'
with urlopen(url) as response:
data = response.read()
data = json.loads(data)[1]
data = pandas.DataFrame(data)
data.adminregion = [x['value'] for x in data.adminregion]
data.incomeLevel = [x['value'] for x in data.incomeLevel]
data.lendingType = [x['value'] for x in data.lendingType]
data.region = [x['value'] for x in data.region]
data = data.rename(columns={'id': 'iso3c', 'iso2Code': 'iso2c'})
return data
def get_indicators():
'''Download information about all World Bank data series
'''
url = 'http://api.worldbank.org/indicators?per_page=50000&format=json'
with urlopen(url) as response:
data = response.read()
data = json.loads(data)[1]
data = pandas.DataFrame(data)
# Clean fields
data.source = [x['value'] for x in data.source]
fun = lambda x: x.encode('ascii', 'ignore')
data.sourceOrganization = data.sourceOrganization.apply(fun)
# Clean topic field
def get_value(x):
try:
return x['value']
except:
return ''
fun = lambda x: [get_value(y) for y in x]
data.topics = data.topics.apply(fun)
data.topics = data.topics.apply(lambda x: ' ; '.join(x))
# Clean outpu
data = data.sort(columns='id')
data.index = pandas.Index(lrange(data.shape[0]))
return data
_cached_series = None
def search(string='gdp.*capi', field='name', case=False):
"""
Search available data series from the world bank
Parameters
----------
string: string
regular expression
field: string
id, name, source, sourceNote, sourceOrganization, topics
See notes below
case: bool
case sensitive search?
Notes
-----
The first time this function is run it will download and cache the full
list of available series. Depending on the speed of your network
connection, this can take time. Subsequent searches will use the cached
copy, so they should be much faster.
id : Data series indicator (for use with the ``indicator`` argument of
``WDI()``) e.g. NY.GNS.ICTR.GN.ZS"
name: Short description of the data series
source: Data collection project
sourceOrganization: Data collection organization
note:
sourceNote:
topics:
"""
# Create cached list of series if it does not exist
global _cached_series
if type(_cached_series) is not pandas.core.frame.DataFrame:
_cached_series = get_indicators()
data = _cached_series[field]
idx = data.str.contains(string, case=case)
out = _cached_series.ix[idx].dropna()
return out
| artistic-2.0 |
ssaeger/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 150 | 3651 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_,ms2.cluster_centers_)
assert_array_equal(ms1.labels_,ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
nvoron23/python-weka-wrapper | python/weka/plot/classifiers.py | 2 | 13248 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# classifiers.py
# Copyright (C) 2014-2015 Fracpete (pythonwekawrapper at gmail dot com)
import javabridge
import logging
import weka.plot as plot
if plot.matplotlib_available:
import matplotlib.pyplot as plt
from weka.core.classes import JavaObject, join_options
from weka.core.dataset import Instances
from weka.classifiers import Classifier, Evaluation, NumericPrediction, NominalPrediction
# logging setup
logger = logging.getLogger(__name__)
def plot_classifier_errors(predictions, absolute=True, max_relative_size=50, absolute_size=50, title=None,
outfile=None, wait=True):
"""
Plots the classifers for the given list of predictions.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param predictions: the predictions to plot
:type predictions: list
:param absolute: whether to use absolute errors as size or relative ones
:type absolute: bool
:param max_relative_size: the maximum size in point in case of relative mode
:type max_relative_size: int
:param absolute_size: the size in point in case of absolute mode
:type absolute_size: int
:param title: an optional title
:type title: str
:param outfile: the output file, ignored if None
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
actual = []
predicted = []
error = None
cls = None
for pred in predictions:
actual.append(pred.actual)
predicted.append(pred.predicted)
if isinstance(pred, NumericPrediction):
if error is None:
error = []
error.append(abs(pred.error))
elif isinstance(pred, NominalPrediction):
if cls is None:
cls = []
if pred.actual != pred.predicted:
cls.append(1)
else:
cls.append(0)
fig, ax = plt.subplots()
if error is None and cls is None:
ax.scatter(actual, predicted, s=absolute_size, alpha=0.5)
elif cls is not None:
ax.scatter(actual, predicted, c=cls, s=absolute_size, alpha=0.5)
elif error is not None:
if not absolute:
min_err = min(error)
max_err = max(error)
factor = (max_err - min_err) / max_relative_size
for i in xrange(len(error)):
error[i] = error[i] / factor * max_relative_size
ax.scatter(actual, predicted, s=error, alpha=0.5)
ax.set_xlabel("actual")
ax.set_ylabel("predicted")
if title is None:
title = "Classifier errors"
ax.set_title(title)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.3")
ax.grid(True)
fig.canvas.set_window_title(title)
plt.draw()
if outfile is not None:
plt.savefig(outfile)
if wait:
plt.show()
def generate_thresholdcurve_data(evaluation, class_index):
"""
Generates the threshold curve data from the evaluation object's predictions.
:param evaluation: the evaluation to obtain the predictions from
:type evaluation: Evaluation
:param class_index: the 0-based index of the class-label to create the plot for
:type class_index: int
:return: the generated threshold curve data
:rtype: Instances
"""
jtc = JavaObject.new_instance("weka.classifiers.evaluation.ThresholdCurve")
pred = javabridge.call(evaluation.jobject, "predictions", "()Ljava/util/ArrayList;")
result = Instances(
javabridge.call(jtc, "getCurve", "(Ljava/util/ArrayList;I)Lweka/core/Instances;", pred, class_index))
return result
def get_thresholdcurve_data(data, xname, yname):
"""
Retrieves x and y columns from of the data generated by the weka.classifiers.evaluation.ThresholdCurve
class.
:param data: the threshold curve data
:type data: Instances
:param xname: the name of the X column
:type xname: str
:param yname: the name of the Y column
:type yname: str
:return: tuple of x and y arrays
:rtype: tuple
"""
xi = data.attribute_by_name(xname).index
yi = data.attribute_by_name(yname).index
x = []
y = []
for i in xrange(data.num_instances):
inst = data.get_instance(i)
x.append(inst.get_value(xi))
y.append(inst.get_value(yi))
return x, y
def get_auc(data):
"""
Calculates the area under the ROC curve (AUC).
:param data: the threshold curve data
:type data: Instances
:return: the area
:rtype: float
"""
return javabridge.static_call(
"weka/classifiers/evaluation/ThresholdCurve", "getROCArea", "(Lweka/core/Instances;)D", data.jobject)
def get_prc(data):
"""
Calculates the area under the precision recall curve (PRC).
:param data: the threshold curve data
:type data: Instances
:return: the area
:rtype: float
"""
return javabridge.static_call(
"weka/classifiers/evaluation/ThresholdCurve", "getPRCArea", "(Lweka/core/Instances;)D", data.jobject)
def plot_roc(evaluation, class_index=None, title=None, key_loc="lower right", outfile=None, wait=True):
"""
Plots the ROC (receiver operator characteristics) curve for the given predictions.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param evaluation: the evaluation to obtain the predictions from
:type evaluation: Evaluation
:param class_index: the list of 0-based indices of the class-labels to create the plot for
:type class_index: list
:param title: an optional title
:type title: str
:param key_loc: the position string for the key
:type key_loc: str
:param outfile: the output file, ignored if None
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
if class_index is None:
class_index = [0]
ax = None
for cindex in class_index:
data = generate_thresholdcurve_data(evaluation, cindex)
head = evaluation.header
area = get_auc(data)
x, y = get_thresholdcurve_data(data, "False Positive Rate", "True Positive Rate")
if ax is None:
fig, ax = plt.subplots()
ax.set_xlabel("False Positive Rate")
ax.set_ylabel("True Positive Rate")
if title is None:
title = "ROC"
ax.set_title(title)
ax.grid(True)
fig.canvas.set_window_title(title)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plot_label = head.class_attribute.value(cindex) + " (AUC: %0.4f)" % area
ax.plot(x, y, label=plot_label)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.3")
plt.draw()
plt.legend(loc=key_loc, shadow=True)
if outfile is not None:
plt.savefig(outfile)
if wait:
plt.show()
def plot_prc(evaluation, class_index=None, title=None, key_loc="lower center", outfile=None, wait=True):
"""
Plots the PRC (precision recall) curve for the given predictions.
TODO: click events http://matplotlib.org/examples/event_handling/data_browser.html
:param evaluation: the evaluation to obtain the predictions from
:type evaluation: Evaluation
:param class_index: the list of 0-based indices of the class-labels to create the plot for
:type class_index: list
:param title: an optional title
:type title: str
:param key_loc: the location string for the key
:type key_loc: str
:param outfile: the output file, ignored if None
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
if class_index is None:
class_index = [0]
ax = None
for cindex in class_index:
data = generate_thresholdcurve_data(evaluation, cindex)
head = evaluation.header
area = get_prc(data)
x, y = get_thresholdcurve_data(data, "Recall", "Precision")
if ax is None:
fig, ax = plt.subplots()
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
if title is None:
title = "PRC"
ax.set_title(title)
fig.canvas.set_window_title(title)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
ax.grid(True)
plot_label = head.class_attribute.value(cindex) + " (PRC: %0.4f)" % area
ax.plot(x, y, label=plot_label)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c="0.3")
plt.draw()
plt.legend(loc=key_loc, shadow=True)
if outfile is not None:
plt.savefig(outfile)
if wait:
plt.show()
def plot_learning_curve(classifiers, train, test=None, increments=100, metric="percent_correct",
title="Learning curve", label_template="[#] @ $", key_loc="lower right",
outfile=None, wait=True):
"""
Plots
:param classifiers: list of Classifier template objects
:type classifiers: list of Classifier
:param train: dataset to use for the building the classifier, used for evaluating it test set None
:type train: Instances
:param test: optional dataset to use for the testing the built classifiers
:type test: Instances
:param increments: the increments (>= 1: # of instances, <1: percentage of dataset)
:type increments: float
:param metric: the name of the numeric metric to plot (Evaluation.<metric>)
:type metric: str
:param title: the title for the plot
:type title: str
:param label_template: the template for the label in the plot
(#: 1-based index, @: full classname, !: simple classname, $: options)
:type label_template: str
:param key_loc: the location string for the key
:type key_loc: str
:param outfile: the output file, ignored if None
:type outfile: str
:param wait: whether to wait for the user to close the plot
:type wait: bool
"""
if not plot.matplotlib_available:
logger.error("Matplotlib is not installed, plotting unavailable!")
return
if not train.has_class():
logger.error("Training set has no class attribute set!")
return
if (test is not None) and (train.equal_headers(test) is not None):
logger.error("Training and test set are not compatible: " + train.equal_headers(test))
return
if increments >= 1:
inc = increments
else:
inc = round(train.num_instances * increments)
steps = []
cls = []
evls = {}
for classifier in classifiers:
cl = Classifier.make_copy(classifier)
cls.append(cl)
evls[cl] = []
if test is None:
tst = train
else:
tst = test
for i in xrange(train.num_instances):
if (i > 0) and (i % inc == 0):
steps.append(i+1)
for cl in cls:
# train
if cl.is_updateable:
if i == 0:
tr = Instances.copy_instances(train, 0, 1)
cl.build_classifier(tr)
else:
cl.update_classifier(train.get_instance(i))
else:
if (i > 0) and (i % inc == 0):
tr = Instances.copy_instances(train, 0, i + 1)
cl.build_classifier(tr)
# evaluate
if (i > 0) and (i % inc == 0):
evl = Evaluation(tst)
evl.test_model(cl, tst)
evls[cl].append(getattr(evl, metric))
fig, ax = plt.subplots()
ax.set_xlabel("# of instances")
ax.set_ylabel(metric)
ax.set_title(title)
fig.canvas.set_window_title(title)
ax.grid(True)
i = 0
for cl in cls:
evl = evls[cl]
i += 1
plot_label = label_template.\
replace("#", str(i)).\
replace("@", cl.classname).\
replace("!", cl.classname[cl.classname.rfind(".") + 1:]).\
replace("$", join_options(cl.config))
ax.plot(steps, evl, label=plot_label)
plt.draw()
plt.legend(loc=key_loc, shadow=True)
if outfile is not None:
plt.savefig(outfile)
if wait:
plt.show()
| gpl-3.0 |
JohnOrlando/gnuradio-bitshark | gr-utils/src/python/gr_plot_psd.py | 5 | 11977 | #!/usr/bin/env python
#
# Copyright 2007,2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
from scipy import log10
class gr_plot_psd:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.psdfftsize = options.psd_size
self.specfftsize = options.spec_size
self.dospec = options.enable_spec # if we want to plot the spectrogram
self.datatype = getattr(scipy, datatype) #scipy.complex64
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.95, ("File: %s" % filename), weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.92, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.92, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.915, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_iq.get_xlim()
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % self.position)
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
#print "Read in %d items" % len(self.iq)
if(len(self.iq) == 0):
print "End of File"
else:
tstep = 1.0 / self.sample_rate
self.time = [tstep*(self.position + i) for i in xrange(len(self.iq))]
self.iq_psd, self.freq = self.dopsd(self.iq)
def dopsd(self, iq):
''' Need to do this here and plot later so we can do the fftshift '''
overlap = self.psdfftsize/4
winfunc = scipy.blackman
psd,freq = self.sp_psd.psd(iq, self.psdfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.psdfftsize),
noverlap = overlap, visible=False)
psd = 10.0*log10(abs(fftpack.fftshift(psd)))
return (psd, freq)
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
iqdims = [[0.075, 0.2, 0.4, 0.6], [0.075, 0.55, 0.4, 0.3]]
psddims = [[0.575, 0.2, 0.4, 0.6], [0.575, 0.55, 0.4, 0.3]]
specdims = [0.2, 0.125, 0.6, 0.3]
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=iqdims[self.dospec])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for PSD plot
self.sp_psd = self.fig.add_subplot(2,2,2, position=psddims[self.dospec])
self.sp_psd.set_title(("PSD"), fontsize=self.title_font_size, fontweight="bold")
self.sp_psd.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_psd.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time() # draw the plot
self.plot_psd = self.sp_psd.plot([], 'b') # make plot for PSD
self.draw_psd() # draw the plot
if self.dospec:
# Subplot for spectrogram plot
self.sp_spec = self.fig.add_subplot(2,2,3, position=specdims)
self.sp_spec.set_title(("Spectrogram"), fontsize=self.title_font_size, fontweight="bold")
self.sp_spec.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_spec.set_ylabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.draw_spec()
draw()
def draw_time(self):
reals = self.iq.real
imags = self.iq.imag
self.plot_iq[0].set_data([self.time, reals])
self.plot_iq[1].set_data([self.time, imags])
self.sp_iq.set_xlim(min(self.time), max(self.time))
self.sp_iq.set_ylim([1.5*min([min(reals), min(imags)]),
1.5*max([max(reals), max(imags)])])
def draw_psd(self):
self.plot_psd[0].set_data([self.freq, self.iq_psd])
self.sp_psd.set_ylim([min(self.iq_psd)-10, max(self.iq_psd)+10])
def draw_spec(self):
overlap = self.specfftsize/4
winfunc = scipy.blackman
self.sp_spec.clear()
self.sp_spec.specgram(self.iq, self.specfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.specfftsize),
noverlap = overlap, xextent=[min(self.time), max(self.time)])
def update_plots(self):
self.draw_time()
self.draw_psd()
if self.dospec:
self.draw_spec()
self.xlim = self.sp_iq.get_xlim() # so zoom doesn't get called
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim.all() != curxlim.all()):
self.xlim = newxlim
xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0] - self.position))))
xmax = min(int(ceil(self.sample_rate*(self.xlim[1] - self.position))), len(self.iq))
iq = self.iq[xmin : xmax]
time = self.time[xmin : xmax]
iq_psd, freq = self.dopsd(iq)
self.plot_psd[0].set_data(freq, iq_psd)
self.sp_psd.axis([min(freq), max(freq),
min(iq_psd)-10, max(iq_psd)+10])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.get_data()
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
self.get_data()
self.update_plots()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio binary file (with specified data type using --data-type) and displays the I&Q data versus time as well as the power spectral density (PSD) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples. Finally, the size of the FFT to use for the PSD and spectrogram plots can be set independently with --psd-size and --spec-size, respectively. The spectrogram plot does not display by default and is turned on with -S or --enable-spec."
parser = OptionParser(conflict_handler="resolve", usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=8192,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
parser.add_option("", "--psd-size", type="int", default=1024,
help="Set the size of the PSD FFT [default=%default]")
parser.add_option("", "--spec-size", type="int", default=256,
help="Set the size of the spectrogram FFT [default=%default]")
parser.add_option("-S", "--enable-spec", action="store_true", default=False,
help="Turn on plotting the spectrogram [default=%default]")
return parser
def main():
parser = setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = gr_plot_psd(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
cosmoharrigan/pylearn2 | pylearn2/optimization/test_batch_gradient_descent.py | 44 | 6402 | from __future__ import print_function
from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent
import theano.tensor as T
from pylearn2.utils import sharedX
import numpy as np
from theano.compat.six.moves import xrange
from theano import config
from theano.printing import min_informative_str
def test_batch_gradient_descent():
""" Verify that batch gradient descent works by checking that
it minimizes a quadratic function f(x) = x^T A x + b^T x + c
correctly for several sampled values of A, b, and c.
The ground truth minimizer is x = np.linalg.solve(A,-b)"""
n = 3
A = T.matrix(name = 'A')
b = T.vector(name = 'b')
c = T.scalar(name = 'c')
x = sharedX( np.zeros((n,)) , name = 'x')
half = np.cast[config.floatX](0.5)
obj = half * T.dot(T.dot(x,A),x)+T.dot(b,x)+c
minimizer = BatchGradientDescent(
objective = obj,
params = [ x],
inputs = [ A, b, c])
num_samples = 3
rng = np.random.RandomState([1,2,3])
for i in xrange(num_samples):
A = np.cast[config.floatX](rng.randn(1.5*n,n))
A = np.cast[config.floatX](np.dot(A.T,A))
A += np.cast[config.floatX](np.identity(n) * .02)
b = np.cast[config.floatX](rng.randn(n))
c = np.cast[config.floatX](rng.randn())
x.set_value(np.cast[config.floatX](rng.randn(n)))
analytical_x = np.linalg.solve(A,-b)
actual_obj = minimizer.minimize(A,b,c)
actual_x = x.get_value()
#Check that the value returned by the minimize method
#is the objective function value at the parameters
#chosen by the minimize method
cur_obj = minimizer.obj(A,b,c)
assert np.allclose(actual_obj, cur_obj)
x.set_value(analytical_x)
analytical_obj = minimizer.obj(A,b,c)
#make sure the objective function is accurate to first 4 digits
condition1 = not np.allclose(analytical_obj, actual_obj)
condition2 = np.abs(analytical_obj-actual_obj) >= 1e-4 * \
np.abs(analytical_obj)
if (config.floatX == 'float64' and condition1) \
or (config.floatX == 'float32' and condition2):
print('objective function value came out wrong on sample ',i)
print('analytical obj', analytical_obj)
print('actual obj',actual_obj)
"""
The following section of code was used to verify that numerical
error can make the objective function look non-convex
print('Checking for numerically induced non-convex behavior')
def f(x):
return 0.5 * np.dot(x,np.dot(A,x)) + np.dot(b,x) + c
x.set_value(actual_x)
minimizer._compute_grad(A,b,c)
minimizer._normalize_grad()
d = minimizer.param_to_grad_shared[x].get_value()
x = actual_x.copy()
prev = f(x)
print(prev)
step_size = 1e-4
x += step_size * d
cur = f(x)
print(cur)
cur_sgn = np.sign(cur-prev)
flip_cnt = 0
for i in xrange(10000):
x += step_size * d
prev = cur
cur = f(x)
print(cur)
prev_sgn = cur_sgn
cur_sgn = np.sign(cur-prev)
if cur_sgn != prev_sgn:
print('flip')
flip_cnt += 1
if flip_cnt > 1:
print("Non-convex!")
from matplotlib import pyplot as plt
y = []
x = actual_x.copy()
for j in xrange(10000):
y.append(f(x))
x += step_size * d
plt.plot(y)
plt.show()
assert False
print('None found')
"""
#print 'actual x',actual_x
#print 'A:'
#print A
#print 'b:'
#print b
#print 'c:'
#print c
x.set_value(actual_x)
minimizer._compute_grad(A,b,c)
x_grad = minimizer.param_to_grad_shared[x]
actual_grad = x_grad.get_value()
correct_grad = 0.5 * np.dot(A,x.get_value())+ 0.5 * \
np.dot(A.T, x.get_value()) +b
if not np.allclose(actual_grad, correct_grad):
print('gradient was wrong at convergence point')
print('actual grad: ')
print(actual_grad)
print('correct grad: ')
print(correct_grad)
print('max difference: ', end='')
np.abs(actual_grad-correct_grad).max()
assert False
minimizer._normalize_grad()
d = minimizer.param_to_grad_shared[x].get_value()
step_len = ( np.dot(b,d) + 0.5 * np.dot(d,np.dot(A,actual_x)) \
+ 0.5 * np.dot(actual_x,np.dot(A,d)) ) \
/ np.dot(d, np.dot(A,d))
g = np.dot(A,actual_x)+b
deriv = np.dot(g,d)
print('directional deriv at actual', deriv)
print('optimal step_len', step_len)
optimal_x = actual_x - d * step_len
g = np.dot(A,optimal_x) + b
deriv = np.dot(g,d)
print('directional deriv at optimal: ',deriv)
x.set_value(optimal_x)
print('obj at optimal: ',minimizer.obj(A,b,c))
print('eigenvalue range:')
val, vec = np.linalg.eig(A)
print((val.min(),val.max()))
print('condition number: ',(val.max()/val.min()))
assert False
if __name__ == '__main__':
test_batch_gradient_descent()
| bsd-3-clause |
weegreenblobbie/nsound | docs/user_guide/sphinxext/plot_directive.py | 1 | 17849 | """A special directive for including a matplotlib plot.
The source code for the plot may be included in one of two ways:
1. A path to a source file as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one my specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as inline content to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
In HTML output, `plot` will include a .png file with a link to a high-res
.png and .pdf. In LaTeX output, it will include a .pdf.
To customize the size of the plot, this directive supports all of the
options of the `image` directive, except for `target` (since plot will
add its own target). These include `alt`, `height`, `width`, `scale`,
`align` and `class`.
Additionally, if the `:include-source:` option is provided, the
literal source will be displayed inline in the text, (as well as a
link to the source in HTML). If this source file is in a non-UTF8 or
non-ASCII encoding, the encoding must be specified using the
`:encoding:` option.
The set of file formats to generate can be specified with the
`plot_formats` configuration variable.
Error handling:
Any errors generated during the running of the code are emitted as warnings
using the Python `warnings` module, using a custom category called
`PlotWarning`. To turn the warnings into fatal errors that stop the
documentation build, after adjusting your `sys.path` in your `conf.py` Sphinx
configuration file, use::
import plot_directive
warnings.simplefilter('error', plot_directive.PlotWarning)
"""
import sys, os, shutil, imp, warnings, cStringIO, re
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.parsers.rst import directives
try:
# docutils 0.4
from docutils.parsers.rst.directives.images import align
except ImportError:
# docutils 0.5
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[a-z]', x)[0])
for x in sphinx_version[:2]])
import matplotlib
matplotlib.use('Agg')
import matplotlib.cbook as cbook
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
from matplotlib.sphinxext import only_directives
class PlotWarning(Warning):
"""Warning category for all warnings generated by this directive.
By printing our warnings with this category, it becomes possible to turn
them into errors by using in your conf.py::
warnings.simplefilter('error', plot_directive.PlotWarning)
This way, you can ensure that your docs only build if all your examples
actually run successfully.
"""
pass
# os.path.relpath is new in Python 2.6
if hasattr(os.path, 'relpath'):
relpath = os.path.relpath
else:
# This code is snagged from Python 2.6
def relpath(target, base=os.curdir):
"""
Return a relative path to the target from either the current dir or an optional base dir.
Base can be a directory specified either as absolute or relative to current dir.
"""
if not os.path.exists(target):
raise OSError, 'Target does not exist: '+target
if not os.path.isdir(base):
raise OSError, 'Base is not a directory or does not exist: '+base
base_list = (os.path.abspath(base)).split(os.sep)
target_list = (os.path.abspath(target)).split(os.sep)
# On the windows platform the target may be on a completely
# different drive from the base.
if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]:
raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper()
# Starting from the filepath root, work out how much of the
# filepath is shared by base and target.
for i in range(min(len(base_list), len(target_list))):
if base_list[i] <> target_list[i]: break
else:
# If we broke out of the loop, i is pointing to the first
# differing path elements. If we didn't break out of the
# loop, i is pointing to identical path elements.
# Increment i so that in all cases it points to the first
# differing path elements.
i+=1
rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
if rel_list:
return os.path.join(*rel_list)
else:
return ""
template = """
.. htmlonly::
%(links)s
.. figure:: %(prefix)s%(tmpdir)s/%(outname)s.png
%(options)s
%(caption)s
.. // Nick changed the below format to use image, this will force the image
.. // to appear inline with the text.
.. latexonly::
.. image:: %(prefix)s%(tmpdir)s/%(outname)s.pdf
:scale: 75 %%
:align: center
%(caption)s
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
template_content_indent = ' '
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
def run_code(plot_path, function_name, plot_code):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if plot_code is not None:
exec_code = 'import numpy as np; import matplotlib.pyplot as plt\n%s'%plot_code
try:
exec(exec_code)
except Exception as e:
message =(
"FAILED TO EXECUTE:\n" +
exec_code +
"\n\nException: %s" % str(e))
sys.stderr.write(message)
raise Exception(message)
else:
pwd = os.getcwd()
path, fname = os.path.split(plot_path)
sys.path.insert(0, os.path.abspath(path))
stdout = sys.stdout
sys.stdout = cStringIO.StringIO()
os.chdir(path)
fd = None
try:
fd = open(fname)
module = imp.load_module(
"__plot__", fd, fname, ('py', 'r', imp.PY_SOURCE))
finally:
del sys.path[0]
os.chdir(pwd)
sys.stdout = stdout
if fd is not None:
fd.close()
if function_name is not None:
getattr(module, function_name)()
def run_savefig(plot_path, basename, tmpdir, destdir, formats):
"""
Once a plot script has been imported, this function runs savefig
on all of the figures in all of the desired formats.
"""
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for i, figman in enumerate(fig_managers):
for j, (format, dpi) in enumerate(formats):
if len(fig_managers) == 1:
outname = basename
else:
outname = "%s_%02d" % (basename, i)
outname = outname + "." + format
outpath = os.path.join(tmpdir, outname)
try:
figman.canvas.figure.savefig(outpath, dpi=dpi)
except:
s = cbook.exception_to_str("Exception saving plot %s" % plot_path)
warnings.warn(s, PlotWarning)
return 0
if j > 0:
shutil.copyfile(outpath, os.path.join(destdir, outname))
return len(fig_managers)
def clear_state():
plt.close('all')
matplotlib.rcdefaults()
# Set a default figure size that doesn't overflow typical browser
# windows. The script is free to override it if necessary.
matplotlib.rcParams['figure.figsize'] = (5.5, 4.5)
def render_figures(plot_path, function_name, plot_code, tmpdir, destdir,
formats):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in outdir.
"""
plot_path = str(plot_path) # todo, why is unicode breaking this
basedir, fname = os.path.split(plot_path)
basename, ext = os.path.splitext(fname)
all_exists = True
# Look for single-figure output files first
for format, dpi in formats:
outname = os.path.join(tmpdir, '%s.%s' % (basename, format))
if out_of_date(plot_path, outname):
all_exists = False
break
if all_exists:
return 1
# Then look for multi-figure output files, assuming
# if we have some we have all...
i = 0
while True:
all_exists = True
for format, dpi in formats:
outname = os.path.join(
tmpdir, '%s_%02d.%s' % (basename, i, format))
if out_of_date(plot_path, outname):
all_exists = False
break
if all_exists:
i += 1
else:
break
if i != 0:
return i
# We didn't find the files, so build them
clear_state()
try:
run_code(plot_path, function_name, plot_code)
except Exception as e:
message = (
"Exception occured while executing plot code:\n" +
plot_code +
"\n\n" + str(e))
raise Exception(message)
num_figs = run_savefig(plot_path, basename, tmpdir, destdir, formats)
if '__plot__' in sys.modules:
del sys.modules['__plot__']
return num_figs
def _plot_directive(plot_path, basedir, function_name, plot_code, caption,
options, state_machine):
formats = setup.config.plot_formats
if type(formats) == str:
formats = eval(formats)
fname = os.path.basename(plot_path)
basename, ext = os.path.splitext(fname)
# Get the directory of the rst file, and determine the relative
# path from the resulting html file to the plot_directive links
# (linkdir). This relative path is used for html links *only*,
# and not the embedded image. That is given an absolute path to
# the temporary directory, and then sphinx moves the file to
# build/html/_images for us later.
rstdir, rstfile = os.path.split(state_machine.document.attributes['source'])
outdir = os.path.join('plot_directive', basedir)
reldir = relpath(setup.confdir, rstdir)
linkdir = os.path.join(reldir, outdir)
# tmpdir is where we build all the output files. This way the
# plots won't have to be redone when generating latex after html.
# Prior to Sphinx 0.6, absolute image paths were treated as
# relative to the root of the filesystem. 0.6 and after, they are
# treated as relative to the root of the documentation tree. We
# need to support both methods here.
tmpdir = os.path.join('build', outdir)
tmpdir = os.path.abspath(tmpdir)
if sphinx_version < (0, 6):
prefix = ''
else:
prefix = '/'
if not os.path.exists(tmpdir):
cbook.mkdirs(tmpdir)
# destdir is the directory within the output to store files
# that we'll be linking to -- not the embedded images.
destdir = os.path.abspath(os.path.join(setup.app.builder.outdir, outdir))
if not os.path.exists(destdir):
cbook.mkdirs(destdir)
# Properly indent the caption
caption = '\n'.join(template_content_indent + line.strip()
for line in caption.split('\n'))
# Generate the figures, and return the number of them
num_figs = render_figures(plot_path, function_name, plot_code, tmpdir,
destdir, formats)
# Now start generating the lines of output
lines = []
if plot_code is None:
shutil.copyfile(plot_path, os.path.join(destdir, fname))
if options.has_key('include-source'):
if plot_code is None:
if sphinx_version > (1,):
include_prefix = '/'
else:
include_prefix = setup.app.builder.srcdir
lines.extend(
['.. include:: %s' % os.path.join(include_prefix, plot_path),
' :literal:'])
if options.has_key('encoding'):
lines.append(' :encoding: %s' % options['encoding'])
del options['encoding']
else:
lines.extend(['::', ''])
lines.extend([' %s' % row.rstrip()
for row in plot_code.split('\n')])
lines.append('')
del options['include-source']
else:
lines = []
if num_figs > 0:
options = ['%s:%s: %s' % (template_content_indent, key, val)
for key, val in options.items()]
options = "\n".join(options)
for i in range(num_figs):
if num_figs == 1:
outname = basename
else:
outname = "%s_%02d" % (basename, i)
# Copy the linked-to files to the destination within the build tree,
# and add a link for them
links = []
if plot_code is None:
links.append('`source code <%(linkdir)s/%(basename)s.py>`__')
for format, dpi in formats[1:]:
links.append('`%s <%s/%s.%s>`__' % (format, linkdir, outname, format))
if len(links):
links = '[%s]' % (', '.join(links) % locals())
else:
links = ''
lines.extend((template % locals()).split('\n'))
else:
lines.extend((exception_template % locals()).split('\n'))
if len(lines):
state_machine.insert_input(
lines, state_machine.input_lines.source(0))
#~ print "DEBUG START"
#~ for l in lines:
#~ print l
#~ print "DEBUG STOP"
return []
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
Handle the arguments to the plot directive. The real work happens
in _plot_directive.
"""
# The user may provide a filename *or* Python code content, but not both
if len(arguments):
plot_path = directives.uri(arguments[0])
basedir = relpath(os.path.dirname(plot_path), setup.app.builder.srcdir)
# If there is content, it will be passed as a caption.
# Indent to match expansion below. XXX - The number of spaces matches
# that of the 'options' expansion further down. This should be moved
# to common code to prevent them from diverging accidentally.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
return _plot_directive(plot_path, basedir, function_name, None, caption,
options, state_machine)
else:
plot_code = '\n'.join(content)
# Since we don't have a filename, use a hash based on the content
plot_path = md5(plot_code).hexdigest()[-10:]
return _plot_directive(plot_path, 'inline', None, plot_code, '', options,
state_machine)
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in document.nametypes.iteritems():
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': align,
'class': directives.class_option,
'include-source': directives.flag,
'encoding': directives.encoding }
app.add_directive('plot', plot_directive, True, (0, 2, 0), **options)
app.add_config_value(
'plot_formats',
[('png', 80), ('hires.png', 200), ('pdf', 50)],
True)
app.connect('doctree-read', mark_plot_labels)
| gpl-2.0 |
halmd-org/h5md-tools | h5mdtools/_plot/tcf.py | 1 | 6265 | # -*- coding: utf-8 -*-
#
# tcf - time correlation functions
#
# Copyright © 2013 Felix Höfling
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
from .._common import dset_abbrev
def plot(args):
"""
Plot time correlation functions
"""
import h5py
import h5mdtools._plot.label
import numpy as np
from matplotlib import pyplot as plot
from matplotlib import ticker
ax = plot.axes()
# translate abbreviations for dataset name
if not args.type and not args.dataset:
raise SystemExit('Either of the options --type or --dataset is required.')
dset = args.type and dset_abbrev[args.type] or args.dataset
for i, fn in enumerate(args.input):
with h5py.File(fn, 'r') as f:
h5 = f['dynamics']
if args.group:
h5 = h5[args.group]
h5 = h5[dset]
# open HDF5 datasets,
# convert time dataset to NumPy array
x = np.asarray(h5['time'])
y = 'value' in h5 and h5['value'] or h5['mean'] # FIXME
yerr = 'error' in h5 and h5['error'] or None
parameter = 'wavenumber' in h5 and h5['wavenumber'] or None
# x, y, yerr = x[0], y[0], yerr[0]
# apply parameter slice
rank = len(y.shape) - len(x.shape) # tensor rank of TCF
if args.slice:
if rank != 1:
raise SystemExit("Correlation function not eligible for parameter slicing")
s = slice(*args.slice)
y = y[..., s]
if yerr is not None:
yerr = yerr[..., s]
if parameter is not None:
parameter = parameter[s]
# convert to NumPy arrays before closing the HDF5 file
y = np.asarray(y)
if yerr is not None:
yerr = np.asarray(yerr)
if parameter is not None:
parameter = np.asarray(parameter)
# blockwise normalisation
if args.norm:
# normalise with data at t=0,
# distinguish between flat and blocked time data
norm = y[np.where(x == 0)]
if len(x.shape) == 1:
norm = norm.reshape((1,) + y.shape[1:])
elif len(x.shape) == 2:
norm = norm.reshape((y.shape[0],) + (1,) + y.shape[2:])
y = y / norm
if yerr is not None:
yerr = yerr / norm
assert((y[np.where(x==0)] == 1).all)
# flatten time coordinate due to block structure
y = y.reshape((-1,) + y.shape[len(x.shape):])
if yerr is not None:
yerr = yerr.reshape((-1,) + yerr.shape[len(x.shape):])
x = x.flatten()
# sort data by ascending time
idx = x.argsort(kind='mergesort')
x, y = x[idx], y[idx]
if yerr is not None:
yerr = yerr[idx]
if parameter is None or len(parameter) == 1:
c = args.colors[i % len(args.colors)] # cycle plot color
ax.plot(x, y, color=c, label=fn)
if args.axes == 'loglog' or args.axes == 'ylog':
ax.plot(x, -y, '--', color=c) # plot negative values
if yerr is not None:
ax.errorbar(x, y, yerr=yerr, color=c, mec=c, mfc=c)
else:
for j,p in enumerate(parameter):
c = args.colors[j % len(args.colors)] # cycle plot color
label = (i == 0) and '{0:3g}'.format(p) or None
ax.plot(x, y[:, j], color=c, label=label)
if args.axes == 'loglog' or args.axes == 'ylog':
ax.plot(x, -y, '--', color=c) # plot negative values
if yerr is not None:
ax.errorbar(x, y[:, j], yerr=yerr[:, j], color=c, mec=c, mfc=c)
if args.legend or not args.small:
ax.legend(loc=args.legend)
# set plot limits
ax.axis('tight')
if args.xlim:
plot.setp(ax, xlim=args.xlim)
if args.ylim:
plot.setp(ax, ylim=args.ylim)
# optionally plot with logarithmic scale(s)
if args.axes == 'xlog':
ax.set_xscale('log')
if args.axes == 'ylog':
ax.set_yscale('log')
if args.axes == 'loglog':
ax.set_xscale('log')
ax.set_yscale('log')
plot.setp(ax, xlabel=args.xlabel or 'time $t$')
if args.norm:
plot.setp(ax, ylabel=args.ylabel or r'$C(t) / C(0)$')
else:
plot.setp(ax, ylabel=args.ylabel or r'$C(t)$')
if args.output is None:
plot.show()
else:
plot.savefig(args.output, dpi=args.dpi)
def add_parser(subparsers):
parser = subparsers.add_parser('tcf', help='time correlation functions')
parser.add_argument('input', metavar='INPUT', nargs='+', help='H5MD input file')
parser.add_argument('--dataset', help='specify dataset')
parser.add_argument('--group', help='specify particle group')
parser.add_argument('--type', choices=['MSD', 'MQD', 'VACF', 'ISF'], help='time correlation function')
parser.add_argument('--slice', nargs='+', type=int, help='slicing index for the parameter, e.g., wavenumber')
parser.add_argument('--xlim', metavar='VALUE', type=float, nargs=2, help='limit x-axis to given range')
parser.add_argument('--ylim', metavar='VALUE', type=float, nargs=2, help='limit y-axis to given range')
parser.add_argument('--axes', default='xlog', choices=['linear', 'xlog', 'ylog', 'loglog'], help='logarithmic scaling')
parser.add_argument('--norm', action='store_true', help='normalise correlation function by t=0 value')
| gpl-3.0 |
henryzord/clustering | src/measures/sswc.py | 1 | 1931 | import pandas as pd
import numpy as np
from scipy.spatial.distance import cdist
__author__ = 'Henry Cagnini'
def get_partition(medoids, dataset):
medoid_index = np.flatnonzero(medoids)
medoids_sample = dataset.loc[medoid_index]
m_dist = cdist(dataset, medoids_sample, metric='euclidean')
closest = map(lambda x: medoid_index[np.argmin(x)], m_dist)
return closest
def sswc(medoids, dataset):
"""
Calculates the Simplified Silhouette Width Criterion
:type medoids: numpy.ndarray
:param medoids: A true array where zeros denote default objects,
and ones the medoids.
:type dataset: pandas.DataFrame
:param dataset: Dataset WITHOUT the group/class attribute.
:return: The Simplified Silhouette Width Criterion.
"""
n_objects, n_attributes = dataset.shape
medoid_index = np.flatnonzero(medoids) # index of objects that are medoids
medoids_sample = dataset.loc[medoid_index] # values for attributes for medoid objects
m_dist = cdist(dataset, medoids_sample, metric='euclidean') # distance between objects and medoid objects
a_raw = np.array(map(np.argmin, m_dist))
a = np.array(map(lambda i: m_dist[i, np.argmin(m_dist[i])], xrange(n_objects)), dtype=np.float32) # distance to closest object
for i in xrange(n_objects):
m_dist[i, a_raw[i]] = np.inf
b = np.array(map(lambda i: m_dist[i, np.argmin(m_dist[i])], xrange(n_objects)), np.float32) # distance to second closest object
index = np.sum((b - a) / np.maximum(b, a)) / float(n_objects)
return np.float32(index)
def main():
dataset_path = '../../datasets/iris.csv'
dataset = pd.read_csv(dataset_path, names=['sepal_width', 'sepal_height', 'petal_width', 'petal_height'], header=None)
medoids = np.zeros(dataset.shape[0], dtype=np.int)
medoids[[49, 99, 149]] = 1
index = sswc(medoids, dataset)
print 'sswc: %f' % index
main()
| gpl-3.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/api/filled_step.py | 1 | 7707 | """
=========================
Hatch-filled histograms
=========================
This example showcases the hatching capabilities of matplotlib by plotting
various histograms.
"""
import itertools
from collections import OrderedDict
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from cycler import cycler
from six.moves import zip
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
def filled_hist(ax, edges, values, bottoms=None, orientation='v',
**kwargs):
"""
Draw a histogram as a stepped patch.
Extra kwargs are passed through to `fill_between`
Parameters
----------
ax : Axes
The axes to plot to
edges : array
A length n+1 array giving the left edges of each bin and the
right edge of the last bin.
values : array
A length n array of bin counts or values
bottoms : scalar or array, optional
A length n array of the bottom of the bars. If None, zero is used.
orientation : {'v', 'h'}
Orientation of the histogram. 'v' (default) has
the bars increasing in the positive y-direction.
Returns
-------
ret : PolyCollection
Artist added to the Axes
"""
print(orientation)
if orientation not in set('hv'):
raise ValueError("orientation must be in {{'h', 'v'}} "
"not {o}".format(o=orientation))
kwargs.setdefault('step', 'post')
edges = np.asarray(edges)
values = np.asarray(values)
if len(edges) - 1 != len(values):
raise ValueError('Must provide one more bin edge than value not: '
'len(edges): {lb} len(values): {lv}'.format(
lb=len(edges), lv=len(values)))
if bottoms is None:
bottoms = np.zeros_like(values)
if np.isscalar(bottoms):
bottoms = np.ones_like(values) * bottoms
values = np.r_[values, values[-1]]
bottoms = np.r_[bottoms, bottoms[-1]]
if orientation == 'h':
return ax.fill_betweenx(edges, values, bottoms,
**kwargs)
elif orientation == 'v':
return ax.fill_between(edges, values, bottoms,
**kwargs)
else:
raise AssertionError("you should never be here")
def stack_hist(ax, stacked_data, sty_cycle, bottoms=None,
hist_func=None, labels=None,
plot_func=None, plot_kwargs=None):
"""
ax : axes.Axes
The axes to add artists too
stacked_data : array or Mapping
A (N, M) shaped array. The first dimension will be iterated over to
compute histograms row-wise
sty_cycle : Cycler or operable of dict
Style to apply to each set
bottoms : array, optional
The initial positions of the bottoms, defaults to 0
hist_func : callable, optional
Must have signature `bin_vals, bin_edges = f(data)`.
`bin_edges` expected to be one longer than `bin_vals`
labels : list of str, optional
The label for each set.
If not given and stacked data is an array defaults to 'default set {n}'
If stacked_data is a mapping, and labels is None, default to the keys
(which may come out in a random order).
If stacked_data is a mapping and labels is given then only
the columns listed by be plotted.
plot_func : callable, optional
Function to call to draw the histogram must have signature:
ret = plot_func(ax, edges, top, bottoms=bottoms,
label=label, **kwargs)
plot_kwargs : dict, optional
Any extra kwargs to pass through to the plotting function. This
will be the same for all calls to the plotting function and will
over-ride the values in cycle.
Returns
-------
arts : dict
Dictionary of artists keyed on their labels
"""
# deal with default binning function
if hist_func is None:
hist_func = np.histogram
# deal with default plotting function
if plot_func is None:
plot_func = filled_hist
# deal with default
if plot_kwargs is None:
plot_kwargs = {}
print(plot_kwargs)
try:
l_keys = stacked_data.keys()
label_data = True
if labels is None:
labels = l_keys
except AttributeError:
label_data = False
if labels is None:
labels = itertools.repeat(None)
if label_data:
loop_iter = enumerate((stacked_data[lab], lab, s) for lab, s in
zip(labels, sty_cycle))
else:
loop_iter = enumerate(zip(stacked_data, labels, sty_cycle))
arts = {}
for j, (data, label, sty) in loop_iter:
if label is None:
label = 'dflt set {n}'.format(n=j)
label = sty.pop('label', label)
vals, edges = hist_func(data)
if bottoms is None:
bottoms = np.zeros_like(vals)
top = bottoms + vals
print(sty)
sty.update(plot_kwargs)
print(sty)
ret = plot_func(ax, edges, top, bottoms=bottoms,
label=label, **sty)
bottoms = top
arts[label] = ret
ax.legend(fontsize=10)
return arts
# set up histogram function to fixed bins
edges = np.linspace(-3, 3, 20, endpoint=True)
hist_func = partial(np.histogram, bins=edges)
# set up style cycles
color_cycle = cycler(facecolor=plt.rcParams['axes.prop_cycle'][:4])
label_cycle = cycler('label', ['set {n}'.format(n=n) for n in range(4)])
hatch_cycle = cycler('hatch', ['/', '*', '+', '|'])
# Fixing random state for reproducibility
np.random.seed(19680801)
stack_data = np.random.randn(4, 12250)
dict_data = OrderedDict(zip((c['label'] for c in label_cycle), stack_data))
###############################################################################
# Work with plain arrays
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5), tight_layout=True)
arts = stack_hist(ax1, stack_data, color_cycle + label_cycle + hatch_cycle,
hist_func=hist_func)
arts = stack_hist(ax2, stack_data, color_cycle,
hist_func=hist_func,
plot_kwargs=dict(edgecolor='w', orientation='h'))
ax1.set_ylabel('counts')
ax1.set_xlabel('x')
ax2.set_xlabel('counts')
ax2.set_ylabel('x')
###############################################################################
# Work with labeled data
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5),
tight_layout=True, sharey=True)
arts = stack_hist(ax1, dict_data, color_cycle + hatch_cycle,
hist_func=hist_func)
arts = stack_hist(ax2, dict_data, color_cycle + hatch_cycle,
hist_func=hist_func, labels=['set 0', 'set 3'])
ax1.xaxis.set_major_locator(mticker.MaxNLocator(5))
ax1.set_xlabel('counts')
ax1.set_ylabel('x')
ax2.set_ylabel('x')
pltshow(plt)
| mit |
WarrenWeckesser/scikits-image | doc/ext/notebook_doc.py | 44 | 3042 | __all__ = ['python_to_notebook', 'Notebook']
import json
import copy
import warnings
# Skeleton notebook in JSON format
skeleton_nb = """{
"metadata": {
"name":""
},
"nbformat": 3,
"nbformat_minor": 0,
"worksheets": [
{
"cells": [
{
"cell_type": "code",
"collapsed": false,
"input": [
"%matplotlib inline"
],
"language": "python",
"metadata": {},
"outputs": []
}
],
"metadata": {}
}
]
}"""
class Notebook(object):
"""
Notebook object for building an IPython notebook cell-by-cell.
"""
def __init__(self):
# cell type code
self.cell_code = {
'cell_type': 'code',
'collapsed': False,
'input': [
'# Code Goes Here'
],
'language': 'python',
'metadata': {},
'outputs': []
}
# cell type markdown
self.cell_md = {
'cell_type': 'markdown',
'metadata': {},
'source': [
'Markdown Goes Here'
]
}
self.template = json.loads(skeleton_nb)
self.cell_type = {'input': self.cell_code, 'source': self.cell_md}
self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'}
def add_cell(self, value, cell_type='code'):
"""Add a notebook cell.
Parameters
----------
value : str
Cell content.
cell_type : {'code', 'markdown'}
Type of content (default is 'code').
"""
if cell_type in ['markdown', 'code']:
key = self.valuetype_to_celltype[cell_type]
cells = self.template['worksheets'][0]['cells']
cells.append(copy.deepcopy(self.cell_type[key]))
# assign value to the last cell
cells[-1][key] = value
else:
warnings.warn('Ignoring unsupported cell type (%s)' % cell_type)
def json(self):
"""Return a JSON representation of the notebook.
Returns
-------
str
JSON notebook.
"""
return json.dumps(self.template, indent=2)
def test_notebook_basic():
nb = Notebook()
assert(json.loads(nb.json()) == json.loads(skeleton_nb))
def test_notebook_add():
nb = Notebook()
str1 = 'hello world'
str2 = 'f = lambda x: x * x'
nb.add_cell(str1, cell_type='markdown')
nb.add_cell(str2, cell_type='code')
d = json.loads(nb.json())
cells = d['worksheets'][0]['cells']
values = [c['input'] if c['cell_type'] == 'code' else c['source']
for c in cells]
assert values[1] == str1
assert values[2] == str2
assert cells[1]['cell_type'] == 'markdown'
assert cells[2]['cell_type'] == 'code'
if __name__ == "__main__":
import numpy.testing as npt
npt.run_module_suite()
| bsd-3-clause |
macks22/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
shyamalschandra/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 86 | 4092 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
k_means_cluster_centers = np.sort(k_means.cluster_centers_, axis=0)
mbk_means_cluster_centers = np.sort(mbk.cluster_centers_, axis=0)
k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for k in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
yafeunteun/wikipedia-spam-classifier | revscoring/revscoring/scorer_models/test_statistics/recall_at_fpr.py | 1 | 3635 | import io
from collections import defaultdict
from sklearn.metrics import recall_score
from tabulate import tabulate
from . import util
from .test_statistic import ClassifierStatistic, TestStatistic
class recall_at_fpr(ClassifierStatistic):
"""
Constructs a statistics generator that measures the maximum recall
that can be achieved at maximum false-positive rate. As a classifier
gets better, the attainable recall at low false-positive rates should
increase.
When applied to a test set, the `score()` method will return a dictionary
with three fields:
* threshold: The probability threshold where recall was maximized
* recall: The recall at `threshold`
* fpr: The false-positive rate at `threshold`
:Parameters:
max_fpr : `float`
Maximum false-positive rate that will be tolerated
"""
def __init__(self, max_fpr):
self.max_fpr = max_fpr
super().__init__(max_fpr=max_fpr)
def _single_class_stat(self, scores, labels, comparison_label):
y_proba = [s['probability'][comparison_label] for s in scores]
y_true = [label == comparison_label for label in labels]
probas = set(y_proba)
proba_recall_fprs = [
(proba, recall_score(y_true, [p >= proba for p in y_proba]),
util.fpr_score(y_true, [p >= proba for p in y_proba]))
for proba in probas
]
filtered = [(proba, recall, fpr)
for proba, recall, fpr in proba_recall_fprs
if fpr <= self.max_fpr]
if len(filtered) == 0:
return {
'threshold': None,
'recall': None,
'fpr': None
}
else:
filtered.sort(key=lambda v: v[1], reverse=True)
return dict(zip(['threshold', 'recall', 'fpr'], filtered[0]))
def merge(self, stats):
label_vals = defaultdict(lambda: defaultdict(list))
for stat in stats:
for label, label_stat in stat.items():
label_vals[label]['threshold'].append(label_stat['threshold'])
label_vals[label]['recall'].append(label_stat['recall'])
label_vals[label]['fpr'].append(label_stat['fpr'])
merged_stats = {}
for label, metric_vals in label_vals.items():
merged_stats[label] = \
{name: util.mean_or_none(vals)
for name, vals in metric_vals.items()}
return merged_stats
def format(self, stats, format="str"):
if format == "str":
return self.format_str(stats)
elif format == "json":
return util.round_floats(stats, 3)
else:
raise TypeError("Format '{0}' not available for {1}."
.format(format, self.__name__))
def format_str(self, stat):
formatted = io.StringIO()
formatted.write("Recall @ {0} false-positive rate:\n"
.format(self.max_fpr))
table_data = [(repr(label),
util.round_or_none(stat[label]['threshold'], 3),
util.round_or_none(stat[label]['recall'], 3),
util.round_or_none(stat[label]['fpr'], 3))
for label in sorted(stat.keys())]
table = tabulate(
table_data, headers=["label", "threshold", "recall", "fpr"])
formatted.write("".join(["\t" + line + "\n" for line in
table.split("\n")]))
return formatted.getvalue()
TestStatistic.register("recall_at_fpr", recall_at_fpr)
| mit |
roryyorke/python-control | examples/cruise-control.py | 1 | 17056 | # cruise-control.py - Cruise control example from FBS
# RMM, 16 May 2019
#
# The cruise control system of a car is a common feedback system encountered
# in everyday life. The system attempts to maintain a constant velocity in the
# presence of disturbances primarily caused by changes in the slope of a
# road. The controller compensates for these unknowns by measuring the speed
# of the car and adjusting the throttle appropriately.
#
# This file explore the dynamics and control of the cruise control system,
# following the material presenting in Feedback Systems by Astrom and Murray.
# A full nonlinear model of the vehicle dynamics is used, with both PI and
# state space control laws. Different methods of constructing control systems
# are show, all using the InputOutputSystem class (and subclasses).
import numpy as np
import matplotlib.pyplot as plt
from math import pi
import control as ct
#
# Section 4.1: Cruise control modeling and control
#
# Vehicle model: vehicle()
#
# To develop a mathematical model we start with a force balance for
# the car body. Let v be the speed of the car, m the total mass
# (including passengers), F the force generated by the contact of the
# wheels with the road, and Fd the disturbance force due to gravity,
# friction, and aerodynamic drag.
def vehicle_update(t, x, u, params={}):
"""Vehicle dynamics for cruise control system.
Parameters
----------
x : array
System state: car velocity in m/s
u : array
System input: [throttle, gear, road_slope], where throttle is
a float between 0 and 1, gear is an integer between 1 and 5,
and road_slope is in rad.
Returns
-------
float
Vehicle acceleration
"""
from math import copysign, sin
sign = lambda x: copysign(1, x) # define the sign() function
# Set up the system parameters
m = params.get('m', 1600.)
g = params.get('g', 9.8)
Cr = params.get('Cr', 0.01)
Cd = params.get('Cd', 0.32)
rho = params.get('rho', 1.3)
A = params.get('A', 2.4)
alpha = params.get(
'alpha', [40, 25, 16, 12, 10]) # gear ratio / wheel radius
# Define variables for vehicle state and inputs
v = x[0] # vehicle velocity
throttle = np.clip(u[0], 0, 1) # vehicle throttle
gear = u[1] # vehicle gear
theta = u[2] # road slope
# Force generated by the engine
omega = alpha[int(gear)-1] * v # engine angular speed
F = alpha[int(gear)-1] * motor_torque(omega, params) * throttle
# Disturbance forces
#
# The disturbance force Fd has three major components: Fg, the forces due
# to gravity; Fr, the forces due to rolling friction; and Fa, the
# aerodynamic drag.
# Letting the slope of the road be \theta (theta), gravity gives the
# force Fg = m g sin \theta.
Fg = m * g * sin(theta)
# A simple model of rolling friction is Fr = m g Cr sgn(v), where Cr is
# the coefficient of rolling friction and sgn(v) is the sign of v (+/- 1) or
# zero if v = 0.
Fr = m * g * Cr * sign(v)
# The aerodynamic drag is proportional to the square of the speed: Fa =
# 1/\rho Cd A |v| v, where \rho is the density of air, Cd is the
# shape-dependent aerodynamic drag coefficient, and A is the frontal area
# of the car.
Fa = 1/2 * rho * Cd * A * abs(v) * v
# Final acceleration on the car
Fd = Fg + Fr + Fa
dv = (F - Fd) / m
return dv
# Engine model: motor_torque
#
# The force F is generated by the engine, whose torque is proportional to
# the rate of fuel injection, which is itself proportional to a control
# signal 0 <= u <= 1 that controls the throttle position. The torque also
# depends on engine speed omega.
def motor_torque(omega, params={}):
# Set up the system parameters
Tm = params.get('Tm', 190.) # engine torque constant
omega_m = params.get('omega_m', 420.) # peak engine angular speed
beta = params.get('beta', 0.4) # peak engine rolloff
return np.clip(Tm * (1 - beta * (omega/omega_m - 1)**2), 0, None)
# Define the input/output system for the vehicle
vehicle = ct.NonlinearIOSystem(
vehicle_update, None, name='vehicle',
inputs = ('u', 'gear', 'theta'), outputs = ('v'), states=('v'))
# Figure 1.11: A feedback system for controlling the speed of a vehicle. In
# this example, the speed of the vehicle is measured and compared to the
# desired speed. The controller is a PI controller represented as a transfer
# function. In the textbook, the simulations are done for LTI systems, but
# here we simulate the full nonlinear system.
# Construct a PI controller with rolloff, as a transfer function
Kp = 0.5 # proportional gain
Ki = 0.1 # integral gain
control_tf = ct.tf2io(
ct.TransferFunction([Kp, Ki], [1, 0.01*Ki/Kp]),
name='control', inputs='u', outputs='y')
# Construct the closed loop control system
# Inputs: vref, gear, theta
# Outputs: v (vehicle velocity)
cruise_tf = ct.InterconnectedSystem(
(control_tf, vehicle), name='cruise',
connections = (
('control.u', '-vehicle.v'),
('vehicle.u', 'control.y')),
inplist = ('control.u', 'vehicle.gear', 'vehicle.theta'),
inputs = ('vref', 'gear', 'theta'),
outlist = ('vehicle.v', 'vehicle.u'),
outputs = ('v', 'u'))
# Define the time and input vectors
T = np.linspace(0, 25, 101)
vref = 20 * np.ones(T.shape)
gear = 4 * np.ones(T.shape)
theta0 = np.zeros(T.shape)
# Now simulate the effect of a hill at t = 5 seconds
plt.figure()
plt.suptitle('Response to change in road slope')
vel_axes = plt.subplot(2, 1, 1)
inp_axes = plt.subplot(2, 1, 2)
theta_hill = np.array([
0 if t <= 5 else
4./180. * pi * (t-5) if t <= 6 else
4./180. * pi for t in T])
for m in (1200, 1600, 2000):
# Compute the equilibrium state for the system
X0, U0 = ct.find_eqpt(
cruise_tf, [0, vref[0]], [vref[0], gear[0], theta0[0]],
iu=[1, 2], y0=[vref[0], 0], iy=[0], params={'m':m})
t, y = ct.input_output_response(
cruise_tf, T, [vref, gear, theta_hill], X0, params={'m':m})
# Plot the velocity
plt.sca(vel_axes)
plt.plot(t, y[0])
# Plot the input
plt.sca(inp_axes)
plt.plot(t, y[1])
# Add labels to the plots
plt.sca(vel_axes)
plt.ylabel('Speed [m/s]')
plt.legend(['m = 1000 kg', 'm = 2000 kg', 'm = 3000 kg'], frameon=False)
plt.sca(inp_axes)
plt.ylabel('Throttle')
plt.xlabel('Time [s]')
# Figure 4.2: Torque curves for a typical car engine. The graph on the
# left shows the torque generated by the engine as a function of the
# angular velocity of the engine, while the curve on the right shows
# torque as a function of car speed for different gears.
plt.figure()
plt.suptitle('Torque curves for typical car engine')
# Figure 4.2a - single torque curve as function of omega
omega_range = np.linspace(0, 700, 701)
plt.subplot(2, 2, 1)
plt.plot(omega_range, [motor_torque(w) for w in omega_range])
plt.xlabel('Angular velocity $\omega$ [rad/s]')
plt.ylabel('Torque $T$ [Nm]')
plt.grid(True, linestyle='dotted')
# Figure 4.2b - torque curves in different gears, as function of velocity
plt.subplot(2, 2, 2)
v_range = np.linspace(0, 70, 71)
alpha = [40, 25, 16, 12, 10]
for gear in range(5):
omega_range = alpha[gear] * v_range
plt.plot(v_range, [motor_torque(w) for w in omega_range],
color='blue', linestyle='solid')
# Set up the axes and style
plt.axis([0, 70, 100, 200])
plt.grid(True, linestyle='dotted')
# Add labels
plt.text(11.5, 120, '$n$=1')
plt.text(24, 120, '$n$=2')
plt.text(42.5, 120, '$n$=3')
plt.text(58.5, 120, '$n$=4')
plt.text(58.5, 185, '$n$=5')
plt.xlabel('Velocity $v$ [m/s]')
plt.ylabel('Torque $T$ [Nm]')
plt.show(block=False)
# Figure 4.3: Car with cruise control encountering a sloping road
# PI controller model: control_pi()
#
# We add to this model a feedback controller that attempts to regulate the
# speed of the car in the presence of disturbances. We shall use a
# proportional-integral controller
def pi_update(t, x, u, params={}):
# Get the controller parameters that we need
ki = params.get('ki', 0.1)
kaw = params.get('kaw', 2) # anti-windup gain
# Assign variables for inputs and states (for readability)
v = u[0] # current velocity
vref = u[1] # reference velocity
z = x[0] # integrated error
# Compute the nominal controller output (needed for anti-windup)
u_a = pi_output(t, x, u, params)
# Compute anti-windup compensation (scale by ki to account for structure)
u_aw = kaw/ki * (np.clip(u_a, 0, 1) - u_a) if ki != 0 else 0
# State is the integrated error, minus anti-windup compensation
return (vref - v) + u_aw
def pi_output(t, x, u, params={}):
# Get the controller parameters that we need
kp = params.get('kp', 0.5)
ki = params.get('ki', 0.1)
# Assign variables for inputs and states (for readability)
v = u[0] # current velocity
vref = u[1] # reference velocity
z = x[0] # integrated error
# PI controller
return kp * (vref - v) + ki * z
control_pi = ct.NonlinearIOSystem(
pi_update, pi_output, name='control',
inputs = ['v', 'vref'], outputs = ['u'], states = ['z'],
params = {'kp':0.5, 'ki':0.1})
# Create the closed loop system
cruise_pi = ct.InterconnectedSystem(
(vehicle, control_pi), name='cruise',
connections=(
('vehicle.u', 'control.u'),
('control.v', 'vehicle.v')),
inplist=('control.vref', 'vehicle.gear', 'vehicle.theta'),
outlist=('control.u', 'vehicle.v'), outputs=['u', 'v'])
# Figure 4.3b shows the response of the closed loop system. The figure shows
# that even if the hill is so steep that the throttle changes from 0.17 to
# almost full throttle, the largest speed error is less than 1 m/s, and the
# desired velocity is recovered after 20 s.
# Define a function for creating a "standard" cruise control plot
def cruise_plot(sys, t, y, t_hill=5, vref=20, antiwindup=False,
linetype='b-', subplots=[None, None]):
# Figure out the plot bounds and indices
v_min = vref-1.2; v_max = vref+0.5; v_ind = sys.find_output('v')
u_min = 0; u_max = 2 if antiwindup else 1; u_ind = sys.find_output('u')
# Make sure the upper and lower bounds on v are OK
while max(y[v_ind]) > v_max: v_max += 1
while min(y[v_ind]) < v_min: v_min -= 1
# Create arrays for return values
subplot_axes = list(subplots)
# Velocity profile
if subplot_axes[0] is None:
subplot_axes[0] = plt.subplot(2, 1, 1)
else:
plt.sca(subplots[0])
plt.plot(t, y[v_ind], linetype)
plt.plot(t, vref*np.ones(t.shape), 'k-')
plt.plot([t_hill, t_hill], [v_min, v_max], 'k--')
plt.axis([0, t[-1], v_min, v_max])
plt.xlabel('Time $t$ [s]')
plt.ylabel('Velocity $v$ [m/s]')
# Commanded input profile
if subplot_axes[1] is None:
subplot_axes[1] = plt.subplot(2, 1, 2)
else:
plt.sca(subplots[1])
plt.plot(t, y[u_ind], 'r--' if antiwindup else linetype)
plt.plot([t_hill, t_hill], [u_min, u_max], 'k--')
plt.axis([0, t[-1], u_min, u_max])
plt.xlabel('Time $t$ [s]')
plt.ylabel('Throttle $u$')
# Applied input profile
if antiwindup:
# TODO: plot the actual signal from the process?
plt.plot(t, np.clip(y[u_ind], 0, 1), linetype)
plt.legend(['Commanded', 'Applied'], frameon=False)
return subplot_axes
# Define the time and input vectors
T = np.linspace(0, 30, 101)
vref = 20 * np.ones(T.shape)
gear = 4 * np.ones(T.shape)
theta0 = np.zeros(T.shape)
# Compute the equilibrium throttle setting for the desired speed (solve for x
# and u given the gear, slope, and desired output velocity)
X0, U0, Y0 = ct.find_eqpt(
cruise_pi, [vref[0], 0], [vref[0], gear[0], theta0[0]],
y0=[0, vref[0]], iu=[1, 2], iy=[1], return_y=True)
# Now simulate the effect of a hill at t = 5 seconds
plt.figure()
plt.suptitle('Car with cruise control encountering sloping road')
theta_hill = [
0 if t <= 5 else
4./180. * pi * (t-5) if t <= 6 else
4./180. * pi for t in T]
t, y = ct.input_output_response(cruise_pi, T, [vref, gear, theta_hill], X0)
cruise_plot(cruise_pi, t, y)
#
# Example 7.8: State space feedback with integral action
#
# State space controller model: control_sf_ia()
#
# Construct a state space controller with integral action, linearized around
# an equilibrium point. The controller is constructed around the equilibrium
# point (x_d, u_d) and includes both feedforward and feedback compensation.
#
# Controller inputs: (x, y, r) system states, system output, reference
# Controller state: z integrated error (y - r)
# Controller output: u state feedback control
#
# Note: to make the structure of the controller more clear, we implement this
# as a "nonlinear" input/output module, even though the actual input/output
# system is linear. This also allows the use of parameters to set the
# operating point and gains for the controller.
def sf_update(t, z, u, params={}):
y, r = u[1], u[2]
return y - r
def sf_output(t, z, u, params={}):
# Get the controller parameters that we need
K = params.get('K', 0)
ki = params.get('ki', 0)
kf = params.get('kf', 0)
xd = params.get('xd', 0)
yd = params.get('yd', 0)
ud = params.get('ud', 0)
# Get the system state and reference input
x, y, r = u[0], u[1], u[2]
return ud - K * (x - xd) - ki * z + kf * (r - yd)
# Create the input/output system for the controller
control_sf = ct.NonlinearIOSystem(
sf_update, sf_output, name='control',
inputs=('x', 'y', 'r'),
outputs=('u'),
states=('z'))
# Create the closed loop system for the state space controller
cruise_sf = ct.InterconnectedSystem(
(vehicle, control_sf), name='cruise',
connections=(
('vehicle.u', 'control.u'),
('control.x', 'vehicle.v'),
('control.y', 'vehicle.v')),
inplist=('control.r', 'vehicle.gear', 'vehicle.theta'),
outlist=('control.u', 'vehicle.v'), outputs=['u', 'v'])
# Compute the linearization of the dynamics around the equilibrium point
# Y0 represents the steady state with PI control => we can use it to
# identify the steady state velocity and required throttle setting.
xd = Y0[1]
ud = Y0[0]
yd = Y0[1]
# Compute the linearized system at the eq pt
cruise_linearized = ct.linearize(vehicle, xd, [ud, gear[0], 0])
# Construct the gain matrices for the system
A, B, C = cruise_linearized.A, cruise_linearized.B[0, 0], cruise_linearized.C
K = 0.5
kf = -1 / (C * np.linalg.inv(A - B * K) * B)
# Response of the system with no integral feedback term
plt.figure()
plt.suptitle('Cruise control with proportional and PI control')
theta_hill = [
0 if t <= 8 else
4./180. * pi * (t-8) if t <= 9 else
4./180. * pi for t in T]
t, y = ct.input_output_response(
cruise_sf, T, [vref, gear, theta_hill], [X0[0], 0],
params={'K':K, 'kf':kf, 'ki':0.0, 'kf':kf, 'xd':xd, 'ud':ud, 'yd':yd})
subplots = cruise_plot(cruise_sf, t, y, t_hill=8, linetype='b--')
# Response of the system with state feedback + integral action
t, y = ct.input_output_response(
cruise_sf, T, [vref, gear, theta_hill], [X0[0], 0],
params={'K':K, 'kf':kf, 'ki':0.1, 'kf':kf, 'xd':xd, 'ud':ud, 'yd':yd})
cruise_plot(cruise_sf, t, y, t_hill=8, linetype='b-', subplots=subplots)
# Add a legend
plt.legend(['Proportional', 'PI control'], frameon=False)
# Example 11.5: simulate the effect of a (steeper) hill at t = 5 seconds
#
# The windup effect occurs when a car encounters a hill that is so steep (6
# deg) that the throttle saturates when the cruise controller attempts to
# maintain speed.
plt.figure()
plt.suptitle('Cruise control with integrator windup')
T = np.linspace(0, 70, 101)
vref = 20 * np.ones(T.shape)
theta_hill = [
0 if t <= 5 else
6./180. * pi * (t-5) if t <= 6 else
6./180. * pi for t in T]
t, y = ct.input_output_response(
cruise_pi, T, [vref, gear, theta_hill], X0,
params={'kaw':0})
cruise_plot(cruise_pi, t, y, antiwindup=True)
# Example 11.6: add anti-windup compensation
#
# Anti-windup can be applied to the system to improve the response. Because of
# the feedback from the actuator model, the output of the integrator is
# quickly reset to a value such that the controller output is at the
# saturation limit.
plt.figure()
plt.suptitle('Cruise control with integrator anti-windup protection')
t, y = ct.input_output_response(
cruise_pi, T, [vref, gear, theta_hill], X0,
params={'kaw':2.})
cruise_plot(cruise_pi, t, y, antiwindup=True)
# If running as a standalone program, show plots and wait before closing
import os
if __name__ == '__main__' and 'PYCONTROL_TEST_EXAMPLES' not in os.environ:
plt.show()
else:
plt.show(block=False)
| bsd-3-clause |
kyleabeauchamp/testrepo | src/make_graph.py | 4 | 1173 | import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
pairs = [("openmm", "yank"), ("cmake", "openmm"), ("cuda", "openmm"), ("fftw3f", "openmm"), ("swig", "openmm"), ("mpi4py", "yank"), ("netcdf4", "yank"),
("numpy", "mdtraj"), ("scipy", "mdtraj"), # ("mdtraj", "mixtape"), ("sklearn", "mixtape"),
("sphinx-bibtex", "openmm"), ("mdtraj", "yank"), ("ambermini", "yank")]
pairs = [(b, a) for (a, b) in pairs]
all_nodes = list(np.unique(pairs))
hard_nodes_list = [
all_nodes,
["cmake", "cuda", "sphinx-bibtex", "ambermini", "openmm", "mdtraj"],
[]
]
graph = nx.DiGraph(pairs)
positions = nx.spring_layout(graph)
for (k, hard_nodes) in enumerate(hard_nodes_list):
plt.figure()
easy_nodes = list(np.setdiff1d(all_nodes, hard_nodes))
nx.draw(graph, pos=positions, alpha=0.0)
nx.draw_networkx_nodes(graph, positions, nodelist=easy_nodes, node_color="b", alpha=0.5)
nx.draw_networkx_nodes(graph, positions, nodelist=hard_nodes, node_color="r", alpha=0.5)
nx.draw_networkx_edges(graph, positions, alpha=0.5)
nx.draw_networkx_labels(graph, positions, font_size=18)
plt.savefig("./figures/dependencies%d.png" % k)
| gpl-2.0 |
pastas/pasta | tests/test_gxg.py | 1 | 4842 | # -*- coding: utf-8 -*-
"""
Author: T. van Steijn, R.A. Collenteur, 2017
"""
import numpy as np
import pandas as pd
import pastas as ps
class TestGXG(object):
def test_ghg(self):
idx = pd.to_datetime(['20160114', '20160115', '20160128', '20160214'])
s = pd.Series([10., 3., 30., 20.], index=idx)
v = ps.stats.ghg(s, min_n_meas=1, min_n_years=1)
assert v == 30.0
def test_ghg_ffill(self):
idx = pd.to_datetime(['20160101', '20160115', '20160130'])
s = pd.Series([0., 0., 10.], index=idx)
v = ps.stats.ghg(s, fill_method='ffill', limit=15, min_n_meas=1,
min_n_years=1)
assert v == 0.
def test_ghg_bfill(self):
idx = pd.to_datetime(['20160101', '20160115', '20160130'])
s = pd.Series([0., 0., 10.], index=idx)
v = ps.stats.ghg(s, fill_method='bfill', limit=15, min_n_meas=1,
min_n_years=1)
# TODO is this correct?
assert v == 10.
def test_ghg_linear(self):
idx = pd.to_datetime(['20160101', '20160110', '20160120', '20160130'])
s = pd.Series([0., 0., 10., 10.], index=idx)
v = ps.stats.ghg(s, fill_method='linear', min_n_meas=1,
min_n_years=1,limit=8)
# TODO is this correct?
assert v == 10.
def test_ghg_len_yearly(self):
idx = pd.date_range('20000101', '20550101', freq='d')
s = pd.Series(np.ones(len(idx)), index=idx)
v = ps.stats.ghg(s, output='yearly')
assert v.notna().sum() == 55
def test_glg(self):
idx = pd.date_range('20000101', '20550101', freq='d')
s = pd.Series([x.month + x.day for x in idx], index=idx, )
v = ps.stats.glg(s, year_offset='a')
assert v == 16.
def test_glg_fill_limit(self):
idx = pd.to_datetime(['20170115', '20170130', '20200101'])
s = pd.Series(np.ones(len(idx)), index=idx)
v = ps.stats.glg(s, fill_method='linear', limit=15,
output='yearly', year_offset='a',min_n_meas=1)
assert v.notna().sum() == 2
def test_glg_fill_limit_null(self):
idx = pd.to_datetime(['20170101', '20170131', '20200101'])
s = pd.Series(np.ones(len(idx)), index=idx)
v = ps.stats.glg(s, fill_method='linear', limit=None,
output='yearly', year_offset='a',min_n_meas=1)
assert v.notna().sum() == 3
def test_gvg(self):
idx = pd.to_datetime(['20170314', '20170328', '20170414', '20170428'])
s = pd.Series([1., 2., 3., 4], index=idx)
v = ps.stats.gvg(s, fill_method='linear', output='mean', min_n_meas=1,
min_n_years=1)
assert v == 2.
def test_gvg_nan(self):
idx = pd.to_datetime(['20170228', '20170428', '20170429'])
s = pd.Series([1., 2., 3.], index=idx)
v = ps.stats.gvg(s, fill_method=None, output='mean', min_n_meas=1,
min_n_years=1)
assert np.isnan(v)
# def test_gxg_series(self):
# s = pd.read_csv('data\\hseries_gxg.csv', index_col=0, header=0,
# parse_dates=True, dayfirst=True, squeeze=True)
# ps = Model(s)
# ps.freq = 'D'
# ghg = ps.stats.ghg(s)
# glg = ps.stats.glg(s)
# gvg = ps.stats.gvg(s)
# print('\n')
# print('calculated GXG\'s classic method: \n')
# print(('GHG: {ghg:.2f} m+NAP\n'
# 'GLG: {glg:.2f} m+NAP\n'
# 'GVG: {gvg:.2f} m+NAP\n').format(
# ghg=ghg, glg=glg, gvg=gvg))
# print('Menyanthes GXG\'s: \n')
# print(('GHG: {ghg:.2f} m+NAP\n'
# 'GLG: {glg:.2f} m+NAP\n'
# 'GVG: {gvg:.2f} m+NAP\n').format(
# ghg=-3.23, glg=-3.82, gvg=-3.43))
# def test_gxg_series(self, capsys):
# s = pd.read_csv(r'data/hseries_gxg.csv', index_col=0, header=0,
# parse_dates=True, dayfirst=True,
# squeeze=True,)
# ps = Model(s)
# ghg = ps.stats.ghg(s)
# glg = ps.stats.glg(s)
# gvg = ps.stats.gvg(s)
# with capsys.disabled():
# print('\n')
# print('calculated GXG\'s: \n')
# print(('GHG: {ghg:.2f} m+NAP\n'
# 'GLG: {glg:.2f} m+NAP\n'
# 'GVG: {gvg:.2f} m+NAP\n').format(
# ghg=ghg, glg=glg, gvg=gvg))
# print('Menyanthes GXG\'s: \n')
# print(('GHG: {ghg:.2f} m+NAP\n'
# 'GLG: {glg:.2f} m+NAP\n'
# 'GVG: {gvg:.2f} m+NAP\n').format(
# ghg=-3.23, glg=-3.82, gvg=-3.43))
| mit |
nmayorov/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
cjayb/mne-python | mne/epochs.py | 1 | 133716 | # -*- coding: utf-8 -*-
"""Tools for working with epoched data."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Denis Engemann <denis.engemann@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
from collections import Counter
from copy import deepcopy
import json
import operator
import os.path as op
import warnings
import numpy as np
from .io.write import (start_file, start_block, end_file, end_block,
write_int, write_float, write_float_matrix,
write_double_matrix, write_complex_float_matrix,
write_complex_double_matrix, write_id, write_string,
_get_split_size, _NEXT_FILE_BUFFER)
from .io.meas_info import read_meas_info, write_meas_info, _merge_info
from .io.open import fiff_open, _get_next_fname
from .io.tree import dir_tree_find
from .io.tag import read_tag, read_tag_info
from .io.constants import FIFF
from .io.fiff.raw import _get_fname_rep
from .io.pick import (pick_types, channel_indices_by_type, channel_type,
pick_channels, pick_info, _pick_data_channels,
_pick_aux_channels, _DATA_CH_TYPES_SPLIT,
_picks_to_idx)
from .io.proj import setup_proj, ProjMixin, _proj_equal
from .io.base import BaseRaw, TimeMixin
from .bem import _check_origin
from .evoked import EvokedArray, _check_decim
from .baseline import rescale, _log_rescale
from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from .filter import detrend, FilterMixin
from .event import _read_events_fif, make_fixed_length_events
from .fixes import _get_args, rng_uniform
from .viz import (plot_epochs, plot_epochs_psd, plot_epochs_psd_topomap,
plot_epochs_image, plot_topo_image_epochs, plot_drop_log)
from .utils import (_check_fname, check_fname, logger, verbose,
_time_mask, check_random_state, warn, _pl,
sizeof_fmt, SizeMixin, copy_function_doc_to_method_doc,
_check_pandas_installed, _check_preload, GetEpochsMixin,
_prepare_read_metadata, _prepare_write_metadata,
_check_event_id, _gen_events, _check_option,
_check_combine, ShiftTimeMixin, _build_data_frame,
_check_pandas_index_arguments, _convert_times,
_scale_dataframe_data, _check_time_format, object_size,
_on_missing)
from .utils.docs import fill_doc
def _pack_reject_params(epochs):
reject_params = dict()
for key in ('reject', 'flat', 'reject_tmin', 'reject_tmax'):
val = getattr(epochs, key, None)
if val is not None:
reject_params[key] = val
return reject_params
def _save_split(epochs, fname, part_idx, n_parts, fmt):
"""Split epochs.
Anything new added to this function also needs to be added to
BaseEpochs.save to account for new file sizes.
"""
# insert index in filename
path, base = op.split(fname)
idx = base.find('.')
if part_idx > 0:
fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx,
base[idx + 1:]))
next_fname = None
if part_idx < n_parts - 1:
next_fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx + 1,
base[idx + 1:]))
next_idx = part_idx + 1
fid = start_file(fname)
info = epochs.info
meas_id = info['meas_id']
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
# Write measurement info
write_meas_info(fid, info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
start_block(fid, FIFF.FIFFB_MNE_EPOCHS)
# write events out after getting data to ensure bad events are dropped
data = epochs.get_data()
_check_option('fmt', fmt, ['single', 'double'])
if np.iscomplexobj(data):
if fmt == 'single':
write_function = write_complex_float_matrix
elif fmt == 'double':
write_function = write_complex_double_matrix
else:
if fmt == 'single':
write_function = write_float_matrix
elif fmt == 'double':
write_function = write_double_matrix
start_block(fid, FIFF.FIFFB_MNE_EVENTS)
write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, epochs.events.T)
write_string(fid, FIFF.FIFF_DESCRIPTION, _event_id_string(epochs.event_id))
end_block(fid, FIFF.FIFFB_MNE_EVENTS)
# Metadata
if epochs.metadata is not None:
start_block(fid, FIFF.FIFFB_MNE_METADATA)
metadata = _prepare_write_metadata(epochs.metadata)
write_string(fid, FIFF.FIFF_DESCRIPTION, metadata)
end_block(fid, FIFF.FIFFB_MNE_METADATA)
# First and last sample
first = int(round(epochs.tmin * info['sfreq'])) # round just to be safe
last = first + len(epochs.times) - 1
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
# save baseline
if epochs.baseline is not None:
bmin, bmax = epochs.baseline
bmin = epochs.times[0] if bmin is None else bmin
bmax = epochs.times[-1] if bmax is None else bmax
write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
# The epochs itself
decal = np.empty(info['nchan'])
for k in range(info['nchan']):
decal[k] = 1.0 / (info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0))
data *= decal[np.newaxis, :, np.newaxis]
write_function(fid, FIFF.FIFF_EPOCH, data)
# undo modifications to data
data /= decal[np.newaxis, :, np.newaxis]
write_string(fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG,
json.dumps(epochs.drop_log))
reject_params = _pack_reject_params(epochs)
if reject_params:
write_string(fid, FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT,
json.dumps(reject_params))
write_int(fid, FIFF.FIFF_MNE_EPOCHS_SELECTION,
epochs.selection)
# And now write the next file info in case epochs are split on disk
if next_fname is not None and n_parts > 1:
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
if meas_id is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
end_block(fid, FIFF.FIFFB_REF)
end_block(fid, FIFF.FIFFB_MNE_EPOCHS)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
def _event_id_string(event_id):
return ';'.join([k + ':' + str(v) for k, v in event_id.items()])
def _merge_events(events, event_id, selection):
"""Merge repeated events."""
event_id = event_id.copy()
new_events = events.copy()
event_idxs_to_delete = list()
unique_events, counts = np.unique(events[:, 0], return_counts=True)
for ev in unique_events[counts > 1]:
# indices at which the non-unique events happened
idxs = (events[:, 0] == ev).nonzero()[0]
# Figure out new value for events[:, 1]. Set to 0, if mixed vals exist
unique_priors = np.unique(events[idxs, 1])
new_prior = unique_priors[0] if len(unique_priors) == 1 else 0
# If duplicate time samples have same event val, "merge" == "drop"
# and no new event_id key will be created
ev_vals = events[idxs, 2]
if len(np.unique(ev_vals)) <= 1:
new_event_val = ev_vals[0]
# Else, make a new event_id for the merged event
else:
# Find all event_id keys involved in duplicated events. These
# keys will be merged to become a new entry in "event_id"
event_id_keys = list(event_id.keys())
event_id_vals = list(event_id.values())
new_key_comps = [event_id_keys[event_id_vals.index(value)]
for value in ev_vals]
# Check if we already have an entry for merged keys of duplicate
# events ... if yes, reuse it
for key in event_id:
if set(key.split('/')) == set(new_key_comps):
new_event_val = event_id[key]
break
# Else, find an unused value for the new key and make an entry into
# the event_id dict
else:
ev_vals = np.concatenate((list(event_id.values()),
events[:, 1:].flatten()),
axis=0)
new_event_val = np.setdiff1d(np.arange(1, 9999999),
ev_vals).min()
new_event_id_key = '/'.join(sorted(new_key_comps))
event_id[new_event_id_key] = int(new_event_val)
# Replace duplicate event times with merged event and remember which
# duplicate indices to delete later
new_events[idxs[0], 1] = new_prior
new_events[idxs[0], 2] = new_event_val
event_idxs_to_delete.extend(idxs[1:])
# Delete duplicate event idxs
new_events = np.delete(new_events, event_idxs_to_delete, 0)
new_selection = np.delete(selection, event_idxs_to_delete, 0)
return new_events, event_id, new_selection
def _handle_event_repeated(events, event_id, event_repeated, selection,
drop_log):
"""Handle repeated events.
Note that drop_log will be modified inplace
"""
assert len(events) == len(selection)
selection = np.asarray(selection)
unique_events, u_ev_idxs = np.unique(events[:, 0], return_index=True)
# Return early if no duplicates
if len(unique_events) == len(events):
return events, event_id, selection, drop_log
# Else, we have duplicates. Triage ...
_check_option('event_repeated', event_repeated, ['error', 'drop', 'merge'])
drop_log = list(drop_log)
if event_repeated == 'error':
raise RuntimeError('Event time samples were not unique. Consider '
'setting the `event_repeated` parameter."')
elif event_repeated == 'drop':
logger.info('Multiple event values for single event times found. '
'Keeping the first occurrence and dropping all others.')
new_events = events[u_ev_idxs]
new_selection = selection[u_ev_idxs]
drop_ev_idxs = np.setdiff1d(selection, new_selection)
for idx in drop_ev_idxs:
drop_log[idx] = drop_log[idx] + ('DROP DUPLICATE',)
selection = new_selection
elif event_repeated == 'merge':
logger.info('Multiple event values for single event times found. '
'Creating new event value to reflect simultaneous events.')
new_events, event_id, new_selection = \
_merge_events(events, event_id, selection)
drop_ev_idxs = np.setdiff1d(selection, new_selection)
for idx in drop_ev_idxs:
drop_log[idx] = drop_log[idx] + ('MERGE DUPLICATE',)
selection = new_selection
drop_log = tuple(drop_log)
# Remove obsolete kv-pairs from event_id after handling
keys = new_events[:, 1:].flatten()
event_id = {k: v for k, v in event_id.items() if v in keys}
return new_events, event_id, selection, drop_log
@fill_doc
class BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin, ShiftTimeMixin,
SetChannelsMixin, InterpolationMixin, FilterMixin,
TimeMixin, SizeMixin, GetEpochsMixin):
"""Abstract base class for Epochs-type classes.
This class provides basic functionality and should never be instantiated
directly. See Epochs below for an explanation of the parameters.
Parameters
----------
info : dict
A copy of the info dict from the raw object.
data : ndarray | None
If ``None``, data will be read from the Raw object. If ndarray, must be
of shape (n_epochs, n_channels, n_times).
events : array of int, shape (n_events, 3)
See `Epochs` docstring.
event_id : int | list of int | dict | None
See `Epochs` docstring.
tmin : float
See `Epochs` docstring.
tmax : float
See `Epochs` docstring.
baseline : None | tuple of length 2
See `Epochs` docstring.
raw : Raw object
An instance of Raw.
%(picks_header)s
See `Epochs` docstring.
reject : dict | None
See `Epochs` docstring.
flat : dict | None
See `Epochs` docstring.
decim : int
See `Epochs` docstring.
reject_tmin : scalar | None
See `Epochs` docstring.
reject_tmax : scalar | None
See `Epochs` docstring.
detrend : int | None
See `Epochs` docstring.
proj : bool | 'delayed'
See `Epochs` docstring.
on_missing : str
See `Epochs` docstring.
preload_at_end : bool
Load all epochs from disk when creating the object
or wait before accessing each epoch (more memory
efficient but can be slower).
selection : iterable | None
Iterable of indices of selected epochs. If ``None``, will be
automatically generated, corresponding to all non-zero events.
drop_log : tuple | None
Tuple of tuple of strings indicating which epochs have been marked to
be ignored.
filename : str | None
The filename (if the epochs are read from disk).
metadata : instance of pandas.DataFrame | None
See :class:`mne.Epochs` docstring.
.. versionadded:: 0.16
event_repeated : str
See :class:`mne.Epochs` docstring.
.. versionadded:: 0.19
%(verbose)s
Notes
-----
The ``BaseEpochs`` class is public to allow for stable type-checking in
user code (i.e., ``isinstance(my_epochs, BaseEpochs)``) but should not be
used as a constructor for Epochs objects (use instead :class:`mne.Epochs`).
"""
@verbose
def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5,
baseline=(None, 0), raw=None, picks=None, reject=None,
flat=None, decim=1, reject_tmin=None, reject_tmax=None,
detrend=None, proj=True, on_missing='raise',
preload_at_end=False, selection=None, drop_log=None,
filename=None, metadata=None, event_repeated='error',
verbose=None): # noqa: D102
self.verbose = verbose
if events is not None: # RtEpochs can have events=None
events_type = type(events)
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore') # deprecation for object array
events = np.asarray(events)
if not np.issubdtype(events.dtype, np.integer):
raise TypeError('events should be a NumPy array of integers, '
'got {}'.format(events_type))
event_id = _check_event_id(event_id, events)
self.event_id = event_id
del event_id
if events is not None: # RtEpochs can have events=None
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError('events must be of shape (N, 3), got %s'
% (events.shape,))
for key, val in self.event_id.items():
if val not in events[:, 2]:
msg = ('No matching events found for %s '
'(event id %i)' % (key, val))
_on_missing(on_missing, msg)
# ensure metadata matches original events size
self.selection = np.arange(len(events))
self.events = events
self.metadata = metadata
del events
values = list(self.event_id.values())
selected = np.where(np.in1d(self.events[:, 2], values))[0]
if selection is None:
selection = selected
else:
selection = np.array(selection, int)
if selection.shape != (len(selected),):
raise ValueError('selection must be shape %s got shape %s'
% (selected.shape, selection.shape))
self.selection = selection
if drop_log is None:
self.drop_log = tuple(
() if k in self.selection else ('IGNORED',)
for k in range(max(len(self.events),
max(self.selection) + 1)))
else:
self.drop_log = drop_log
self.events = self.events[selected]
self.events, self.event_id, self.selection, self.drop_log = \
_handle_event_repeated(
self.events, self.event_id, event_repeated,
self.selection, self.drop_log)
# then subselect
sub = np.where(np.in1d(selection, self.selection))[0]
if isinstance(metadata, list):
metadata = [metadata[s] for s in sub]
elif metadata is not None:
metadata = metadata.iloc[sub]
self.metadata = metadata
del metadata
n_events = len(self.events)
if n_events > 1:
if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0:
warn('The events passed to the Epochs constructor are not '
'chronologically ordered.', RuntimeWarning)
if n_events > 0:
logger.info('%d matching events found' % n_events)
else:
raise ValueError('No desired events found.')
else:
self.drop_log = list()
self.selection = np.array([], int)
self.metadata = metadata
# do not set self.events here, let subclass do it
# check reject_tmin and reject_tmax
if (reject_tmin is not None) and (reject_tmin < tmin):
raise ValueError("reject_tmin needs to be None or >= tmin")
if (reject_tmax is not None) and (reject_tmax > tmax):
raise ValueError("reject_tmax needs to be None or <= tmax")
if (reject_tmin is not None) and (reject_tmax is not None):
if reject_tmin >= reject_tmax:
raise ValueError('reject_tmin needs to be < reject_tmax')
if (detrend not in [None, 0, 1]) or isinstance(detrend, bool):
raise ValueError('detrend must be None, 0, or 1')
# check that baseline is in available data
if tmin > tmax:
raise ValueError('tmin has to be less than or equal to tmax')
_check_baseline(baseline, tmin, tmax, info['sfreq'])
logger.info(_log_rescale(baseline))
self.baseline = baseline
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
self.detrend = detrend
self._raw = raw
info._check_consistency()
self.picks = _picks_to_idx(info, picks, none='all', exclude=(),
allow_empty=False)
self.info = pick_info(info, self.picks)
del info
self._current = 0
if data is None:
self.preload = False
self._data = None
else:
assert decim == 1
if data.ndim != 3 or data.shape[2] != \
round((tmax - tmin) * self.info['sfreq']) + 1:
raise RuntimeError('bad data shape')
self.preload = True
self._data = data
self._offset = None
# Handle times
sfreq = float(self.info['sfreq'])
start_idx = int(round(tmin * sfreq))
self._raw_times = np.arange(start_idx,
int(round(tmax * sfreq)) + 1) / sfreq
self._set_times(self._raw_times)
self._decim = 1
self.decimate(decim)
# setup epoch rejection
self.reject = None
self.flat = None
self._reject_setup(reject, flat)
# do the rest
valid_proj = [True, 'delayed', False]
if proj not in valid_proj:
raise ValueError('"proj" must be one of %s, not %s'
% (valid_proj, proj))
if proj == 'delayed':
self._do_delayed_proj = True
logger.info('Entering delayed SSP mode.')
else:
self._do_delayed_proj = False
activate = False if self._do_delayed_proj else proj
self._projector, self.info = setup_proj(self.info, False,
activate=activate)
if preload_at_end:
assert self._data is None
assert self.preload is False
self.load_data() # this will do the projection
elif proj is True and self._projector is not None and data is not None:
# let's make sure we project if data was provided and proj
# requested
# we could do this with np.einsum, but iteration should be
# more memory safe in most instances
for ii, epoch in enumerate(self._data):
self._data[ii] = np.dot(self._projector, epoch)
self._filename = str(filename) if filename is not None else filename
self._check_consistency()
def _check_consistency(self):
"""Check invariants of epochs object."""
assert len(self.selection) == len(self.events)
assert len(self.selection) == sum(
(len(dl) == 0 for dl in self.drop_log))
assert len(self.drop_log) >= len(self.events)
assert hasattr(self, '_times_readonly')
assert not self.times.flags['WRITEABLE']
assert isinstance(self.drop_log, tuple)
assert all(isinstance(log, tuple) for log in self.drop_log)
assert all(isinstance(s, str) for log in self.drop_log for s in log)
def load_data(self):
"""Load the data if not already preloaded.
Returns
-------
epochs : instance of Epochs
The epochs object.
Notes
-----
This function operates in-place.
.. versionadded:: 0.10.0
"""
if self.preload:
return self
self._data = self._get_data()
self.preload = True
self._decim_slice = slice(None, None, None)
self._decim = 1
self._raw_times = self.times
assert self._data.shape[-1] == len(self.times)
self._raw = None # shouldn't need it anymore
return self
@verbose
def decimate(self, decim, offset=0, verbose=None):
"""Decimate the epochs.
Parameters
----------
%(decim)s
%(decim_offset)s
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The decimated Epochs object.
See Also
--------
mne.Evoked.decimate
mne.Epochs.resample
mne.io.Raw.resample
Notes
-----
%(decim_notes)s
If ``decim`` is 1, this method does not copy the underlying data.
.. versionadded:: 0.10.0
References
----------
.. footbibliography::
"""
decim, offset, new_sfreq = _check_decim(self.info, decim, offset)
start_idx = int(round(-self._raw_times[0] * (self.info['sfreq'] *
self._decim)))
self._decim *= decim
i_start = start_idx % self._decim + offset
decim_slice = slice(i_start, None, self._decim)
self.info['sfreq'] = new_sfreq
if self.preload:
if decim != 1:
self._data = self._data[:, :, decim_slice].copy()
self._raw_times = self._raw_times[decim_slice].copy()
else:
self._data = np.ascontiguousarray(self._data)
self._decim_slice = slice(None)
self._decim = 1
else:
self._decim_slice = decim_slice
self._set_times(self._raw_times[self._decim_slice])
return self
@verbose
def apply_baseline(self, baseline=(None, 0), verbose=None):
"""Baseline correct epochs.
Parameters
----------
%(baseline)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The baseline-corrected Epochs object.
Notes
-----
Baseline correction can be done multiple times.
.. versionadded:: 0.10.0
"""
_check_baseline(baseline, self.tmin, self.tmax, self.info['sfreq'])
if self.preload:
picks = _pick_data_channels(self.info, exclude=[],
with_ref_meg=True)
picks_aux = _pick_aux_channels(self.info, exclude=[])
picks = np.sort(np.concatenate((picks, picks_aux)))
rescale(self._data, self.times, baseline, copy=False, picks=picks)
else: # logging happens in "rescale" in "if" branch
logger.info(_log_rescale(baseline))
self.baseline = baseline
return self
def _reject_setup(self, reject, flat):
"""Set self._reject_time and self._channel_type_idx."""
idx = channel_indices_by_type(self.info)
reject = deepcopy(reject) if reject is not None else dict()
flat = deepcopy(flat) if flat is not None else dict()
for rej, kind in zip((reject, flat), ('reject', 'flat')):
if not isinstance(rej, dict):
raise TypeError('reject and flat must be dict or None, not %s'
% type(rej))
bads = set(rej.keys()) - set(idx.keys())
if len(bads) > 0:
raise KeyError('Unknown channel types found in %s: %s'
% (kind, bads))
for key in idx.keys():
# don't throw an error if rejection/flat would do nothing
if len(idx[key]) == 0 and (np.isfinite(reject.get(key, np.inf)) or
flat.get(key, -1) >= 0):
# This is where we could eventually add e.g.
# self.allow_missing_reject_keys check to allow users to
# provide keys that don't exist in data
raise ValueError("No %s channel found. Cannot reject based on "
"%s." % (key.upper(), key.upper()))
# check for invalid values
for rej, kind in zip((reject, flat), ('Rejection', 'Flat')):
for key, val in rej.items():
if val is None or val < 0:
raise ValueError('%s value must be a number >= 0, not "%s"'
% (kind, val))
# now check to see if our rejection and flat are getting more
# restrictive
old_reject = self.reject if self.reject is not None else dict()
old_flat = self.flat if self.flat is not None else dict()
bad_msg = ('{kind}["{key}"] == {new} {op} {old} (old value), new '
'{kind} values must be at least as stringent as '
'previous ones')
for key in set(reject.keys()).union(old_reject.keys()):
old = old_reject.get(key, np.inf)
new = reject.get(key, np.inf)
if new > old:
raise ValueError(bad_msg.format(kind='reject', key=key,
new=new, old=old, op='>'))
for key in set(flat.keys()).union(old_flat.keys()):
old = old_flat.get(key, -np.inf)
new = flat.get(key, -np.inf)
if new < old:
raise ValueError(bad_msg.format(kind='flat', key=key,
new=new, old=old, op='<'))
# after validation, set parameters
self._bad_dropped = False
self._channel_type_idx = idx
self.reject = reject if len(reject) > 0 else None
self.flat = flat if len(flat) > 0 else None
if (self.reject_tmin is None) and (self.reject_tmax is None):
self._reject_time = None
else:
if self.reject_tmin is None:
reject_imin = None
else:
idxs = np.nonzero(self.times >= self.reject_tmin)[0]
reject_imin = idxs[0]
if self.reject_tmax is None:
reject_imax = None
else:
idxs = np.nonzero(self.times <= self.reject_tmax)[0]
reject_imax = idxs[-1]
self._reject_time = slice(reject_imin, reject_imax)
@verbose
def _is_good_epoch(self, data, verbose=None):
"""Determine if epoch is good."""
if isinstance(data, str):
return False, (data,)
if data is None:
return False, ('NO_DATA',)
n_times = len(self.times)
if data.shape[1] < n_times:
# epoch is too short ie at the end of the data
return False, ('TOO_SHORT',)
if self.reject is None and self.flat is None:
return True, None
else:
if self._reject_time is not None:
data = data[:, self._reject_time]
return _is_good(data, self.ch_names, self._channel_type_idx,
self.reject, self.flat, full_report=True,
ignore_chs=self.info['bads'])
@verbose
def _detrend_offset_decim(self, epoch, verbose=None):
"""Aux Function: detrend, baseline correct, offset, decim.
Note: operates inplace
"""
if (epoch is None) or isinstance(epoch, str):
return epoch
# Detrend
if self.detrend is not None:
picks = _pick_data_channels(self.info, exclude=[])
epoch[picks] = detrend(epoch[picks], self.detrend, axis=1)
# Baseline correct
picks = pick_types(self.info, meg=True, eeg=True, stim=False,
ref_meg=True, eog=True, ecg=True, seeg=True,
emg=True, bio=True, ecog=True, fnirs=True,
exclude=[])
epoch[picks] = rescale(epoch[picks], self._raw_times, self.baseline,
copy=False, verbose=False)
# Decimate if necessary (i.e., epoch not preloaded)
epoch = epoch[:, self._decim_slice]
# handle offset
if self._offset is not None:
epoch += self._offset
return epoch
def iter_evoked(self, copy=False):
"""Iterate over epochs as a sequence of Evoked objects.
The Evoked objects yielded will each contain a single epoch (i.e., no
averaging is performed).
This method resets the object iteration state to the first epoch.
Parameters
----------
copy : bool
If False copies of data and measurement info will be omitted
to save time.
"""
self._current = 0
while True:
try:
out = self.__next__(True)
except StopIteration:
break
data, event_id = out
tmin = self.times[0]
info = self.info
if copy:
info = deepcopy(self.info)
data = data.copy()
yield EvokedArray(data, info, tmin, comment=str(event_id))
def subtract_evoked(self, evoked=None):
"""Subtract an evoked response from each epoch.
Can be used to exclude the evoked response when analyzing induced
activity, see e.g. [1]_.
Parameters
----------
evoked : instance of Evoked | None
The evoked response to subtract. If None, the evoked response
is computed from Epochs itself.
Returns
-------
self : instance of Epochs
The modified instance (instance is also modified inplace).
References
----------
.. [1] David et al. "Mechanisms of evoked and induced responses in
MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006.
"""
logger.info('Subtracting Evoked from Epochs')
if evoked is None:
picks = _pick_data_channels(self.info, exclude=[])
evoked = self.average(picks)
# find the indices of the channels to use
picks = pick_channels(evoked.ch_names, include=self.ch_names)
# make sure the omitted channels are not data channels
if len(picks) < len(self.ch_names):
sel_ch = [evoked.ch_names[ii] for ii in picks]
diff_ch = list(set(self.ch_names).difference(sel_ch))
diff_idx = [self.ch_names.index(ch) for ch in diff_ch]
diff_types = [channel_type(self.info, idx) for idx in diff_idx]
bad_idx = [diff_types.index(t) for t in diff_types if t in
_DATA_CH_TYPES_SPLIT]
if len(bad_idx) > 0:
bad_str = ', '.join([diff_ch[ii] for ii in bad_idx])
raise ValueError('The following data channels are missing '
'in the evoked response: %s' % bad_str)
logger.info(' The following channels are not included in the '
'subtraction: %s' % ', '.join(diff_ch))
# make sure the times match
if (len(self.times) != len(evoked.times) or
np.max(np.abs(self.times - evoked.times)) >= 1e-7):
raise ValueError('Epochs and Evoked object do not contain '
'the same time points.')
# handle SSPs
if not self.proj and evoked.proj:
warn('Evoked has SSP applied while Epochs has not.')
if self.proj and not evoked.proj:
evoked = evoked.copy().apply_proj()
# find the indices of the channels to use in Epochs
ep_picks = [self.ch_names.index(evoked.ch_names[ii]) for ii in picks]
# do the subtraction
if self.preload:
self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :]
else:
if self._offset is None:
self._offset = np.zeros((len(self.ch_names), len(self.times)),
dtype=np.float64)
self._offset[ep_picks] -= evoked.data[picks]
logger.info('[done]')
return self
@fill_doc
def average(self, picks=None, method="mean"):
"""Compute an average over epochs.
Parameters
----------
%(picks_all_data)s
method : str | callable
How to combine the data. If "mean"/"median", the mean/median
are returned.
Otherwise, must be a callable which, when passed an array of shape
(n_epochs, n_channels, n_time) returns an array of shape
(n_channels, n_time).
Note that due to file type limitations, the kind for all
these will be "average".
Returns
-------
evoked : instance of Evoked | dict of Evoked
The averaged epochs.
Notes
-----
Computes an average of all epochs in the instance, even if
they correspond to different conditions. To average by condition,
do ``epochs[condition].average()`` for each condition separately.
When picks is None and epochs contain only ICA channels, no channels
are selected, resulting in an error. This is because ICA channels
are not considered data channels (they are of misc type) and only data
channels are selected when picks is None.
The ``method`` parameter allows e.g. robust averaging.
For example, one could do:
>>> from scipy.stats import trim_mean # doctest:+SKIP
>>> trim = lambda x: trim_mean(x, 0.1, axis=0) # doctest:+SKIP
>>> epochs.average(method=trim) # doctest:+SKIP
This would compute the trimmed mean.
"""
return self._compute_aggregate(picks=picks, mode=method)
@fill_doc
def standard_error(self, picks=None):
"""Compute standard error over epochs.
Parameters
----------
%(picks_all_data)s
Returns
-------
evoked : instance of Evoked
The standard error over epochs.
"""
return self._compute_aggregate(picks, "std")
def _compute_aggregate(self, picks, mode='mean'):
"""Compute the mean or std over epochs and return Evoked."""
# if instance contains ICA channels they won't be included unless picks
# is specified
if picks is None:
check_ICA = [x.startswith('ICA') for x in self.ch_names]
if np.all(check_ICA):
raise TypeError('picks must be specified (i.e. not None) for '
'ICA channel data')
elif np.any(check_ICA):
warn('ICA channels will not be included unless explicitly '
'selected in picks')
n_channels = len(self.ch_names)
n_times = len(self.times)
if self.preload:
n_events = len(self.events)
fun = _check_combine(mode, valid=('mean', 'median', 'std'))
data = fun(self._data)
assert len(self.events) == len(self._data)
if data.shape != self._data.shape[1:]:
raise RuntimeError(
'You passed a function that resulted n data of shape {}, '
'but it should be {}.'.format(
data.shape, self._data.shape[1:]))
else:
if mode not in {"mean", "std"}:
raise ValueError("If data are not preloaded, can only compute "
"mean or standard deviation.")
data = np.zeros((n_channels, n_times))
n_events = 0
for e in self:
if np.iscomplexobj(e):
data = data.astype(np.complex128)
data += e
n_events += 1
if n_events > 0:
data /= n_events
else:
data.fill(np.nan)
# convert to stderr if requested, could do in one pass but do in
# two (slower) in case there are large numbers
if mode == "std":
data_mean = data.copy()
data.fill(0.)
for e in self:
data += (e - data_mean) ** 2
data = np.sqrt(data / n_events)
if mode == "std":
kind = 'standard_error'
data /= np.sqrt(n_events)
else:
kind = "average"
return self._evoked_from_epoch_data(data, self.info, picks, n_events,
kind, self._name)
@property
def _name(self):
"""Give a nice string representation based on event ids."""
if len(self.event_id) == 1:
comment = next(iter(self.event_id.keys()))
else:
count = Counter(self.events[:, 2])
comments = list()
for key, value in self.event_id.items():
comments.append('%.2f * %s' % (
float(count[value]) / len(self.events), key))
comment = ' + '.join(comments)
return comment
def _evoked_from_epoch_data(self, data, info, picks, n_events, kind,
comment):
"""Create an evoked object from epoch data."""
info = deepcopy(info)
evoked = EvokedArray(data, info, tmin=self.times[0], comment=comment,
nave=n_events, kind=kind, verbose=self.verbose)
# XXX: above constructor doesn't recreate the times object precisely
evoked.times = self.times.copy()
# pick channels
picks = _picks_to_idx(self.info, picks, 'data_or_ica', ())
ch_names = [evoked.ch_names[p] for p in picks]
evoked.pick_channels(ch_names)
if len(evoked.info['ch_names']) == 0:
raise ValueError('No data channel found when averaging.')
if evoked.nave < 1:
warn('evoked object is empty (based on less than 1 epoch)')
return evoked
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@copy_function_doc_to_method_doc(plot_epochs)
def plot(self, picks=None, scalings=None, n_epochs=20, n_channels=20,
title=None, events=None, event_colors=None, order=None,
show=True, block=False, decim='auto', noise_cov=None,
butterfly=False, show_scrollbars=True, epoch_colors=None,
event_id=None):
return plot_epochs(self, picks=picks, scalings=scalings,
n_epochs=n_epochs, n_channels=n_channels,
title=title, events=events,
event_colors=event_colors, order=order,
show=show, block=block, decim=decim,
noise_cov=noise_cov, butterfly=butterfly,
show_scrollbars=show_scrollbars,
epoch_colors=epoch_colors, event_id=event_id)
@copy_function_doc_to_method_doc(plot_epochs_psd)
def plot_psd(self, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, ax=None, color='black',
xscale='linear', area_mode='std', area_alpha=0.33,
dB=True, estimate='auto', show=True, n_jobs=1,
average=False, line_alpha=None, spatial_colors=True,
sphere=None, verbose=None):
return plot_epochs_psd(self, fmin=fmin, fmax=fmax, tmin=tmin,
tmax=tmax, proj=proj, bandwidth=bandwidth,
adaptive=adaptive, low_bias=low_bias,
normalization=normalization, picks=picks, ax=ax,
color=color, xscale=xscale, area_mode=area_mode,
area_alpha=area_alpha, dB=dB, estimate=estimate,
show=show, n_jobs=n_jobs, average=average,
line_alpha=line_alpha,
spatial_colors=spatial_colors, sphere=sphere,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_epochs_psd_topomap)
def plot_psd_topomap(self, bands=None, vmin=None, vmax=None, tmin=None,
tmax=None, proj=False, bandwidth=None, adaptive=False,
low_bias=True, normalization='length', ch_type=None,
cmap=None, agg_fun=None, dB=True,
n_jobs=1, normalize=False, cbar_fmt='auto',
outlines='head', axes=None, show=True,
sphere=None, vlim=(None, None), verbose=None):
return plot_epochs_psd_topomap(
self, bands=bands, vmin=vmin, vmax=vmax, tmin=tmin, tmax=tmax,
proj=proj, bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization, ch_type=ch_type,
cmap=cmap, agg_fun=agg_fun, dB=dB, n_jobs=n_jobs,
normalize=normalize, cbar_fmt=cbar_fmt, outlines=outlines,
axes=axes, show=show, sphere=sphere, vlim=vlim, verbose=verbose)
@copy_function_doc_to_method_doc(plot_topo_image_epochs)
def plot_topo_image(self, layout=None, sigma=0., vmin=None, vmax=None,
colorbar=None, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k', fig_background=None,
font_color='w', show=True):
return plot_topo_image_epochs(
self, layout=layout, sigma=sigma, vmin=vmin, vmax=vmax,
colorbar=colorbar, order=order, cmap=cmap,
layout_scale=layout_scale, title=title, scalings=scalings,
border=border, fig_facecolor=fig_facecolor,
fig_background=fig_background, font_color=font_color, show=show)
@verbose
def drop_bad(self, reject='existing', flat='existing', verbose=None):
"""Drop bad epochs without retaining the epochs data.
Should be used before slicing operations.
.. warning:: This operation is slow since all epochs have to be read
from disk. To avoid reading epochs from disk multiple
times, use :meth:`mne.Epochs.load_data()`.
Parameters
----------
reject : dict | str | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. If 'existing',
then the rejection parameters set at instantiation are used.
flat : dict | str | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done. If 'existing',
then the flat parameters set at instantiation are used.
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The epochs with bad epochs dropped. Operates in-place.
Notes
-----
Dropping bad epochs can be done multiple times with different
``reject`` and ``flat`` parameters. However, once an epoch is
dropped, it is dropped forever, so if more lenient thresholds may
subsequently be applied, `epochs.copy <mne.Epochs.copy>` should be
used.
"""
if reject == 'existing':
if flat == 'existing' and self._bad_dropped:
return
reject = self.reject
if flat == 'existing':
flat = self.flat
if any(isinstance(rej, str) and rej != 'existing' for
rej in (reject, flat)):
raise ValueError('reject and flat, if strings, must be "existing"')
self._reject_setup(reject, flat)
self._get_data(out=False)
return self
def drop_log_stats(self, ignore=('IGNORED',)):
"""Compute the channel stats based on a drop_log from Epochs.
Parameters
----------
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
See Also
--------
plot_drop_log
"""
return _drop_log_stats(self.drop_log, ignore)
@copy_function_doc_to_method_doc(plot_drop_log)
def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',),
show=True):
if not self._bad_dropped:
raise ValueError("You cannot use plot_drop_log since bad "
"epochs have not yet been dropped. "
"Use epochs.drop_bad().")
return plot_drop_log(self.drop_log, threshold, n_max_plot, subject,
color=color, width=width, ignore=ignore,
show=show)
@copy_function_doc_to_method_doc(plot_epochs_image)
def plot_image(self, picks=None, sigma=0., vmin=None, vmax=None,
colorbar=True, order=None, show=True, units=None,
scalings=None, cmap=None, fig=None, axes=None,
overlay_times=None, combine=None, group_by=None,
evoked=True, ts_args=None, title=None, clear=False):
return plot_epochs_image(self, picks=picks, sigma=sigma, vmin=vmin,
vmax=vmax, colorbar=colorbar, order=order,
show=show, units=units, scalings=scalings,
cmap=cmap, fig=fig, axes=axes,
overlay_times=overlay_times, combine=combine,
group_by=group_by, evoked=evoked,
ts_args=ts_args, title=title, clear=clear)
@verbose
def drop(self, indices, reason='USER', verbose=None):
"""Drop epochs based on indices or boolean mask.
.. note:: The indices refer to the current set of undropped epochs
rather than the complete set of dropped and undropped epochs.
They are therefore not necessarily consistent with any
external indices (e.g., behavioral logs). To drop epochs
based on external criteria, do not use the ``preload=True``
flag when constructing an Epochs object, and call this
method before calling the :meth:`mne.Epochs.drop_bad` or
:meth:`mne.Epochs.load_data` methods.
Parameters
----------
indices : array of int or bool
Set epochs to remove by specifying indices to remove or a boolean
mask to apply (where True values get removed). Events are
correspondingly modified.
reason : str
Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc).
Default: 'USER'.
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The epochs with indices dropped. Operates in-place.
"""
indices = np.atleast_1d(indices)
if indices.ndim > 1:
raise ValueError("indices must be a scalar or a 1-d array")
if indices.dtype == bool:
indices = np.where(indices)[0]
try_idx = np.where(indices < 0, indices + len(self.events), indices)
out_of_bounds = (try_idx < 0) | (try_idx >= len(self.events))
if out_of_bounds.any():
first = indices[out_of_bounds][0]
raise IndexError("Epoch index %d is out of bounds" % first)
keep = np.setdiff1d(np.arange(len(self.events)), try_idx)
self._getitem(keep, reason, copy=False, drop_event_id=False)
count = len(try_idx)
logger.info('Dropped %d epoch%s: %s' %
(count, _pl(count), ', '.join(map(str, np.sort(try_idx)))))
return self
def _get_epoch_from_raw(self, idx, verbose=None):
"""Get a given epoch from disk."""
raise NotImplementedError
def _project_epoch(self, epoch):
"""Process a raw epoch based on the delayed param."""
# whenever requested, the first epoch is being projected.
if (epoch is None) or isinstance(epoch, str):
# can happen if t < 0 or reject based on annotations
return epoch
proj = self._do_delayed_proj or self.proj
if self._projector is not None and proj is True:
epoch = np.dot(self._projector, epoch)
return epoch
@verbose
def _get_data(self, out=True, picks=None, item=None, verbose=None):
"""Load all data, dropping bad epochs along the way.
Parameters
----------
out : bool
Return the data. Setting this to False is used to reject bad
epochs without caching all the data, which saves memory.
%(picks_all)s
%(verbose_meth)s
"""
if item is None:
item = slice(None)
elif not self._bad_dropped:
raise ValueError(
'item must be None in epochs.get_data() unless bads have been '
'dropped. Consider using epochs.drop_bad().')
select = self._item_to_select(item) # indices or slice
use_idx = np.arange(len(self.events))[select]
n_events = len(use_idx)
# in case there are no good events
if self.preload:
# we will store our result in our existing array
data = self._data
else:
# we start out with an empty array, allocate only if necessary
data = np.empty((0, len(self.info['ch_names']), len(self.times)))
logger.info('Loading data for %s events and %s original time '
'points ...' % (n_events, len(self._raw_times)))
if self._bad_dropped:
if not out:
return
if self.preload:
data = data[select]
if picks is None:
return data
else:
picks = _picks_to_idx(self.info, picks)
return data[:, picks]
# we need to load from disk, drop, and return data
for ii, idx in enumerate(use_idx):
# faster to pre-allocate memory here
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(epoch_noproj)
if self._do_delayed_proj:
epoch_out = epoch_noproj
else:
epoch_out = self._project_epoch(epoch_noproj)
if ii == 0:
data = np.empty((n_events, len(self.ch_names),
len(self.times)), dtype=epoch_out.dtype)
data[ii] = epoch_out
else:
# bads need to be dropped, this might occur after a preload
# e.g., when calling drop_bad w/new params
good_idx = []
n_out = 0
drop_log = list(self.drop_log)
assert n_events == len(self.selection)
for idx, sel in enumerate(self.selection):
if self.preload: # from memory
if self._do_delayed_proj:
epoch_noproj = self._data[idx]
epoch = self._project_epoch(epoch_noproj)
else:
epoch_noproj = None
epoch = self._data[idx]
else: # from disk
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(epoch_noproj)
epoch = self._project_epoch(epoch_noproj)
epoch_out = epoch_noproj if self._do_delayed_proj else epoch
is_good, bad_tuple = self._is_good_epoch(epoch)
if not is_good:
assert isinstance(bad_tuple, tuple)
assert all(isinstance(x, str) for x in bad_tuple)
drop_log[sel] = drop_log[sel] + bad_tuple
continue
good_idx.append(idx)
# store the epoch if there is a reason to (output or update)
if out or self.preload:
# faster to pre-allocate, then trim as necessary
if n_out == 0 and not self.preload:
data = np.empty((n_events, epoch_out.shape[0],
epoch_out.shape[1]),
dtype=epoch_out.dtype, order='C')
data[n_out] = epoch_out
n_out += 1
self.drop_log = tuple(drop_log)
del drop_log
self._bad_dropped = True
logger.info("%d bad epochs dropped" % (n_events - len(good_idx)))
# adjust the data size if there is a reason to (output or update)
if out or self.preload:
if data.flags['OWNDATA'] and data.flags['C_CONTIGUOUS']:
data.resize((n_out,) + data.shape[1:], refcheck=False)
else:
data = data[:n_out]
if self.preload:
self._data = data
# Now update our properties (excepd data, which is already fixed)
self._getitem(good_idx, None, copy=False, drop_event_id=False,
select_data=False)
if out:
if picks is None:
return data
else:
picks = _picks_to_idx(self.info, picks)
return data[:, picks]
else:
return None
@fill_doc
def get_data(self, picks=None, item=None):
"""Get all epochs as a 3D array.
Parameters
----------
%(picks_all)s
item : slice | array-like | str | list | None
The items to get. See :meth:`mne.Epochs.__getitem__` for
a description of valid options. This can be substantially faster
for obtaining an ndarray than :meth:`~mne.Epochs.__getitem__`
for repeated access on large Epochs objects.
None (default) is an alias for ``slice(None)``.
.. versionadded:: 0.20
Returns
-------
data : array of shape (n_epochs, n_channels, n_times)
A view on epochs data.
"""
return self._get_data(picks=picks, item=item)
@property
def times(self):
"""Time vector in seconds."""
return self._times_readonly
def _set_times(self, times):
"""Set self._times_readonly (and make it read only)."""
# naming used to indicate that it shouldn't be
# changed directly, but rather via this method
self._times_readonly = times.copy()
self._times_readonly.flags['WRITEABLE'] = False
@property
def tmin(self):
"""First time point."""
return self.times[0]
@property
def filename(self):
"""The filename."""
return self._filename
@property
def tmax(self):
"""Last time point."""
return self.times[-1]
def __repr__(self):
"""Build string representation."""
s = ' %s events ' % len(self.events)
s += '(all good)' if self._bad_dropped else '(good & bad)'
s += ', %g - %g sec' % (self.tmin, self.tmax)
s += ', baseline '
if self.baseline is None:
s += 'off'
else:
s += '[%s, %s]' % tuple(['None' if b is None else ('%g' % b)
for b in self.baseline])
s += ', ~%s' % (sizeof_fmt(self._size),)
s += ', data%s loaded' % ('' if self.preload else ' not')
s += ', with metadata' if self.metadata is not None else ''
counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
for k, v in sorted(self.event_id.items())]
if len(self.event_id) > 0:
s += ',' + '\n '.join([''] + counts)
class_name = self.__class__.__name__
class_name = 'Epochs' if class_name == 'BaseEpochs' else class_name
return '<%s | %s>' % (class_name, s)
@fill_doc
def crop(self, tmin=None, tmax=None, include_tmax=True):
"""Crop a time interval from the epochs.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
%(include_tmax)s
Returns
-------
epochs : instance of Epochs
The cropped epochs object, modified in-place.
Notes
-----
%(notes_tmax_included_by_default)s
"""
# XXX this could be made to work on non-preloaded data...
_check_preload(self, 'Modifying data of epochs')
if tmin is None:
tmin = self.tmin
elif tmin < self.tmin:
warn('tmin is not in epochs time interval. tmin is set to '
'epochs.tmin')
tmin = self.tmin
if tmax is None:
tmax = self.tmax
elif tmax > self.tmax:
warn('tmax is not in epochs time interval. tmax is set to '
'epochs.tmax')
tmax = self.tmax
tmask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'],
include_tmax=include_tmax)
self._set_times(self.times[tmask])
self._raw_times = self._raw_times[tmask]
self._data = self._data[:, :, tmask]
try:
_check_baseline(self.baseline, tmin, tmax, self.info['sfreq'])
except ValueError: # in no longer applies, wipe it out
warn('Cropping removes baseline period, setting '
'epochs.baseline = None')
self.baseline = None
return self
def copy(self):
"""Return copy of Epochs instance.
Returns
-------
epochs : instance of Epochs
A copy of the object.
"""
return deepcopy(self)
def __deepcopy__(self, memodict):
"""Make a deepcopy."""
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
# drop_log is immutable and _raw is private (and problematic to
# deepcopy)
if k in ('drop_log', '_raw', '_times_readonly'):
memodict[id(v)] = v
else:
v = deepcopy(v, memodict)
result.__dict__[k] = v
return result
@verbose
def save(self, fname, split_size='2GB', fmt='single', overwrite=False,
verbose=True):
"""Save epochs in a fif file.
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or
-epo.fif.gz.
split_size : str | int
Large raw files are automatically split into multiple pieces. This
parameter specifies the maximum size of each piece. If the
parameter is an integer, it specifies the size in Bytes. It is
also possible to pass a human-readable string, e.g., 100MB.
Note: Due to FIFF file limitations, the maximum split size is 2GB.
.. versionadded:: 0.10.0
fmt : str
Format to save data. Valid options are 'double' or
'single' for 64- or 32-bit float, or for 128- or
64-bit complex numbers respectively. Note: Data are processed with
double precision. Choosing single-precision, the saved data
will slightly differ due to the reduction in precision.
.. versionadded:: 0.17
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
To overwrite original file (the same one that was loaded),
data must be preloaded upon reading. This defaults to True in 0.18
but will change to False in 0.19.
.. versionadded:: 0.18
%(verbose_meth)s
Notes
-----
Bad epochs will be dropped before saving the epochs to disk.
"""
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz',
'_epo.fif', '_epo.fif.gz'))
# check for file existence
_check_fname(fname, overwrite)
split_size = _get_split_size(split_size)
_check_option('fmt', fmt, ['single', 'double'])
# to know the length accurately. The get_data() call would drop
# bad epochs anyway
self.drop_bad()
if len(self) == 0:
warn('Saving epochs with no data')
total_size = 0
else:
d = self[0].get_data()
# this should be guaranteed by subclasses
assert d.dtype in ('>f8', '<f8', '>c16', '<c16')
total_size = d.nbytes * len(self)
self._check_consistency()
if fmt == "single":
total_size //= 2 # 64bit data converted to 32bit before writing.
total_size += 32 # FIF tags
# Account for all the other things we write, too
# 1. meas_id block plus main epochs block
total_size += 132
# 2. measurement info (likely slight overestimate, but okay)
total_size += object_size(self.info)
# 3. events and event_id in its own block
total_size += (self.events.size * 4 +
len(_event_id_string(self.event_id)) + 72)
# 4. Metadata in a block of its own
if self.metadata is not None:
total_size += len(_prepare_write_metadata(self.metadata)) + 56
# 5. first sample, last sample, baseline
total_size += 40 + 40 * (self.baseline is not None)
# 6. drop log
total_size += len(json.dumps(self.drop_log)) + 16
# 7. reject params
reject_params = _pack_reject_params(self)
if reject_params:
total_size += len(json.dumps(reject_params)) + 16
# 8. selection
total_size += self.selection.size * 4 + 16
# 9. end of file tags
total_size += _NEXT_FILE_BUFFER
# This is like max(int(ceil(total_size / split_size)), 1) but cleaner
n_parts = (total_size - 1) // split_size + 1
assert n_parts >= 1
epoch_idxs = np.array_split(np.arange(len(self)), n_parts)
for part_idx, epoch_idx in enumerate(epoch_idxs):
this_epochs = self[epoch_idx] if n_parts > 1 else self
# avoid missing event_ids in splits
this_epochs.event_id = self.event_id
_save_split(this_epochs, fname, part_idx, n_parts, fmt)
def equalize_event_counts(self, event_ids, method='mintime'):
"""Equalize the number of trials in each condition.
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be
some time-varying (like on the scale of minutes) noise characteristics
during a recording, they could be compensated for (to some extent) in
the equalization process. This method thus seeks to reduce any of
those effects by minimizing the differences in the times of the events
in the two sets of epochs. For example, if one had event times
[1, 2, 3, 4, 120, 121] and the other one had [3.5, 4.5, 120.5, 121.5],
it would remove events at times [1, 2] in the first epochs and not
[20, 21].
Parameters
----------
event_ids : list
The event types to equalize. Each entry in the list can either be
a str (single event) or a list of str. In the case where one of
the entries is a list of str, event_ids in that list will be
grouped together before equalizing trial counts across conditions.
In the case where partial matching is used (using '/' in
``event_ids``), ``event_ids`` will be matched according to the
provided tags, that is, processing works as if the event_ids
matched by the provided tags had been supplied instead.
The event_ids must identify nonoverlapping subsets of the epochs.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list
will be minimized.
Returns
-------
epochs : instance of Epochs
The modified Epochs instance.
indices : array of int
Indices from the original events list that were dropped.
Notes
-----
For example (if epochs.event_id was {'Left': 1, 'Right': 2,
'Nonspatial':3}:
epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial'])
would equalize the number of trials in the 'Nonspatial' condition with
the total number of trials in the 'Left' and 'Right' conditions.
If multiple indices are provided (e.g. 'Left' and 'Right' in the
example above), it is not guaranteed that after equalization, the
conditions will contribute evenly. E.g., it is possible to end up
with 70 'Nonspatial' trials, 69 'Left' and 1 'Right'.
"""
if len(event_ids) == 0:
raise ValueError('event_ids must have at least one element')
if not self._bad_dropped:
self.drop_bad()
# figure out how to equalize
eq_inds = list()
# deal with hierarchical tags
ids = self.event_id
orig_ids = list(event_ids)
tagging = False
if "/" in "".join(ids):
# make string inputs a list of length 1
event_ids = [[x] if isinstance(x, str) else x
for x in event_ids]
for ids_ in event_ids: # check if tagging is attempted
if any([id_ not in ids for id_ in ids_]):
tagging = True
# 1. treat everything that's not in event_id as a tag
# 2a. for tags, find all the event_ids matched by the tags
# 2b. for non-tag ids, just pass them directly
# 3. do this for every input
event_ids = [[k for k in ids
if all((tag in k.split("/")
for tag in id_))] # ids matching all tags
if all(id__ not in ids for id__ in id_)
else id_ # straight pass for non-tag inputs
for id_ in event_ids]
for ii, id_ in enumerate(event_ids):
if len(id_) == 0:
raise KeyError(orig_ids[ii] + "not found in the "
"epoch object's event_id.")
elif len({sub_id in ids for sub_id in id_}) != 1:
err = ("Don't mix hierarchical and regular event_ids"
" like in \'%s\'." % ", ".join(id_))
raise ValueError(err)
# raise for non-orthogonal tags
if tagging is True:
events_ = [set(self[x].events[:, 0]) for x in event_ids]
doubles = events_[0].intersection(events_[1])
if len(doubles):
raise ValueError("The two sets of epochs are "
"overlapping. Provide an "
"orthogonal selection.")
for eq in event_ids:
eq_inds.append(self._keys_to_idx(eq))
event_times = [self.events[e, 0] for e in eq_inds]
indices = _get_drop_indices(event_times, method)
# need to re-index indices
indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)])
self.drop(indices, reason='EQUALIZED_COUNT')
# actually remove the indices
return self, indices
@fill_doc
def to_data_frame(self, picks=None, index=None,
scalings=None, copy=True, long_format=False,
time_format='ms'):
"""Export data in tabular structure as a pandas DataFrame.
Channels are converted to columns in the DataFrame. By default,
additional columns "time", "epoch" (epoch number), and "condition"
(epoch event description) are added, unless ``index`` is not ``None``
(in which case the columns specified in ``index`` will be used to form
the DataFrame's index instead).
Parameters
----------
%(picks_all)s
%(df_index_epo)s
Valid string values are 'time', 'epoch', and 'condition'.
Defaults to ``None``.
%(df_scalings)s
%(df_copy)s
%(df_longform_epo)s
%(df_time_format)s
.. versionadded:: 0.20
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ['time', 'epoch', 'condition']
valid_time_formats = ['ms', 'timedelta']
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
picks = _picks_to_idx(self.info, picks, 'all', exclude=())
data = self.get_data()[:, picks, :]
times = self.times
n_epochs, n_picks, n_times = data.shape
data = np.hstack(data).T # (time*epochs) x signals
if copy:
data = data.copy()
data = _scale_dataframe_data(self, data, picks, scalings)
# prepare extra columns / multiindex
mindex = list()
times = np.tile(times, n_epochs)
times = _convert_times(self, times, time_format)
mindex.append(('time', times))
rev_event_id = {v: k for k, v in self.event_id.items()}
conditions = [rev_event_id[k] for k in self.events[:, 2]]
mindex.append(('condition', np.repeat(conditions, n_times)))
mindex.append(('epoch', np.repeat(self.selection, n_times)))
assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
# build DataFrame
df = _build_data_frame(self, data, picks, long_format, mindex, index,
default_index=['condition', 'epoch', 'time'])
return df
def as_type(self, ch_type='grad', mode='fast'):
"""Compute virtual epochs using interpolated fields.
.. Warning:: Using virtual epochs to compute inverse can yield
unexpected results. The virtual channels have ``'_v'`` appended
at the end of the names to emphasize that the data contained in
them are interpolated.
Parameters
----------
ch_type : str
The destination channel type. It can be 'mag' or 'grad'.
mode : str
Either ``'accurate'`` or ``'fast'``, determines the quality of the
Legendre polynomial expansion used. ``'fast'`` should be sufficient
for most applications.
Returns
-------
epochs : instance of mne.EpochsArray
The transformed epochs object containing only virtual channels.
Notes
-----
This method returns a copy and does not modify the data it
operates on. It also returns an EpochsArray instance.
.. versionadded:: 0.20.0
"""
from .forward import _as_meg_type_inst
return _as_meg_type_inst(self, ch_type=ch_type, mode=mode)
def _check_baseline(baseline, tmin, tmax, sfreq):
"""Check for a valid baseline."""
if baseline is not None:
if not isinstance(baseline, tuple) or len(baseline) != 2:
raise ValueError('`baseline=%s` is an invalid argument, must be '
'a tuple of length 2 or None' % str(baseline))
# check default value of baseline and `tmin=0`
if baseline == (None, 0) and tmin == 0:
raise ValueError('Baseline interval is only one sample. Use '
'`baseline=(0, 0)` if this is desired.')
baseline_tmin, baseline_tmax = baseline
tstep = 1. / float(sfreq)
if baseline_tmin is None:
baseline_tmin = tmin
baseline_tmin = float(baseline_tmin)
if baseline_tmax is None:
baseline_tmax = tmax
baseline_tmax = float(baseline_tmax)
if baseline_tmin < tmin - tstep:
raise ValueError(
"Baseline interval (tmin = %s) is outside of epoch "
"data (tmin = %s)" % (baseline_tmin, tmin))
if baseline_tmax > tmax + tstep:
raise ValueError(
"Baseline interval (tmax = %s) is outside of epoch "
"data (tmax = %s)" % (baseline_tmax, tmax))
if baseline_tmin > baseline_tmax:
raise ValueError(
"Baseline min (%s) must be less than baseline max (%s)"
% (baseline_tmin, baseline_tmax))
def _drop_log_stats(drop_log, ignore=('IGNORED',)):
"""Compute drop log stats.
Parameters
----------
drop_log : list of list
Epoch drop log from Epochs.drop_log.
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
"""
if not isinstance(drop_log, tuple) or \
not all(isinstance(d, tuple) for d in drop_log) or \
not all(isinstance(s, str) for d in drop_log for s in d):
raise TypeError('drop_log must be a tuple of tuple of str')
perc = 100 * np.mean([len(d) > 0 for d in drop_log
if not any(r in ignore for r in d)])
return perc
@fill_doc
class Epochs(BaseEpochs):
"""Epochs extracted from a Raw instance.
Parameters
----------
raw : Raw object
An instance of Raw.
events : array of int, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
tmin : float
Start time before event. If nothing is provided, defaults to -0.2.
tmax : float
End time after event. If nothing is provided, defaults to 0.5.
%(baseline)s
Defaults to ``(None, 0)``, i.e. beginning of the the data until
time point zero.
%(picks_all)s
preload : bool
Load all epochs from disk when creating the object
or wait before accessing each epoch (more memory
efficient but can be slower).
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
%(decim)s
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
detrend : int | None
If 0 or 1, the data channels (MEG and EEG) will be detrended when
loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
is no detrending. Note that detrending is performed before baseline
correction. If no DC offset is preferred (zeroth order detrending),
either turn off baseline correction, as this may introduce a DC
shift, or set baseline correction to use the entire time interval
(will yield equivalent results but be slower).
on_missing : str
What to do if one or several event ids are not found in the recording.
Valid keys are 'raise' | 'warn' | 'ignore'
Default is 'raise'. If on_missing is 'warn' it will proceed but
warn, if 'ignore' it will proceed silently. Note.
If none of the event ids are found in the data, an error will be
automatically generated irrespective of this parameter.
%(reject_by_annotation_epochs)s
metadata : instance of pandas.DataFrame | None
A :class:`pandas.DataFrame` specifying metadata about each epoch.
If given, ``len(metadata)`` must equal ``len(events)``. The DataFrame
may only contain values of type (str | int | float | bool).
If metadata is given, then pandas-style queries may be used to select
subsets of data, see :meth:`mne.Epochs.__getitem__`.
When a subset of the epochs is created in this (or any other
supported) manner, the metadata object is subsetted accordingly, and
the row indices will be modified to match ``epochs.selection``.
.. versionadded:: 0.16
event_repeated : str
How to handle duplicates in ``events[:, 0]``. Can be ``'error'``
(default), to raise an error, 'drop' to only retain the row occurring
first in the ``events``, or ``'merge'`` to combine the coinciding
events (=duplicates) into a new event (see Notes for details).
.. versionadded:: 0.19
%(verbose)s
Attributes
----------
info : instance of Info
Measurement info.
event_id : dict
Names of conditions corresponding to event_ids.
ch_names : list of string
List of channel names.
selection : array
List of indices of selected events (not dropped or ignored etc.). For
example, if the original event array had 4 events and the second event
has been dropped, this attribute would be np.array([0, 2, 3]).
preload : bool
Indicates whether epochs are in memory.
drop_log : tuple of tuple
A tuple of the same length as the event array used to initialize the
Epochs object. If the i-th original event is still part of the
selection, drop_log[i] will be an empty tuple; otherwise it will be
a tuple of the reasons the event is not longer in the selection, e.g.:
- 'IGNORED'
If it isn't part of the current subset defined by the user
- 'NO_DATA' or 'TOO_SHORT'
If epoch didn't contain enough data names of channels that exceeded
the amplitude threshold
- 'EQUALIZED_COUNTS'
See :meth:`~mne.Epochs.equalize_event_counts`
- 'USER'
For user-defined reasons (see :meth:`~mne.Epochs.drop`).
filename : str
The filename of the object.
times : ndarray
Time vector in seconds. Goes from ``tmin`` to ``tmax``. Time interval
between consecutive time samples is equal to the inverse of the
sampling frequency.
%(verbose)s
See Also
--------
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
Notes
-----
When accessing data, Epochs are detrended, baseline-corrected, and
decimated, then projectors are (optionally) applied.
For indexing and slicing using ``epochs[...]``, see
:meth:`mne.Epochs.__getitem__`.
All methods for iteration over objects (using :meth:`mne.Epochs.__iter__`,
:meth:`mne.Epochs.iter_evoked` or :meth:`mne.Epochs.next`) use the same
internal state.
If ``event_repeated`` is set to ``'merge'``, the coinciding events
(duplicates) will be merged into a single event_id and assigned a new
id_number as::
event_id['{event_id_1}/{event_id_2}/...'] = new_id_number
For example with the event_id ``{'aud': 1, 'vis': 2}`` and the events
``[[0, 0, 1], [0, 0, 2]]``, the "merge" behavior will update both event_id
and events to be: ``{'aud/vis': 3}`` and ``[[0, 0, 3]]`` respectively.
"""
@verbose
def __init__(self, raw, events, event_id=None, tmin=-0.2, tmax=0.5,
baseline=(None, 0), picks=None, preload=False, reject=None,
flat=None, proj=True, decim=1, reject_tmin=None,
reject_tmax=None, detrend=None, on_missing='raise',
reject_by_annotation=True, metadata=None,
event_repeated='error', verbose=None): # noqa: D102
if not isinstance(raw, BaseRaw):
raise ValueError('The first argument to `Epochs` must be an '
'instance of mne.io.BaseRaw')
info = deepcopy(raw.info)
# proj is on when applied in Raw
proj = proj or raw.proj
self.reject_by_annotation = reject_by_annotation
# call BaseEpochs constructor
super(Epochs, self).__init__(
info, None, events, event_id, tmin, tmax, metadata=metadata,
baseline=baseline, raw=raw, picks=picks, reject=reject,
flat=flat, decim=decim, reject_tmin=reject_tmin,
reject_tmax=reject_tmax, detrend=detrend,
proj=proj, on_missing=on_missing, preload_at_end=preload,
event_repeated=event_repeated, verbose=verbose)
@verbose
def _get_epoch_from_raw(self, idx, verbose=None):
"""Load one epoch from disk.
Returns
-------
data : array | str | None
If string, it's details on rejection reason.
If array, it's the data in the desired range (good segment)
If None, it means no data is available.
"""
if self._raw is None:
# This should never happen, as raw=None only if preload=True
raise ValueError('An error has occurred, no valid raw file found. '
'Please report this to the mne-python '
'developers.')
sfreq = self._raw.info['sfreq']
event_samp = self.events[idx, 0]
# Read a data segment from "start" to "stop" in samples
first_samp = self._raw.first_samp
start = int(round(event_samp + self._raw_times[0] * sfreq))
start -= first_samp
stop = start + len(self._raw_times)
# reject_tmin, and reject_tmax need to be converted to samples to
# check the reject_by_annotation boundaries: reject_start, reject_stop
reject_tmin = self.reject_tmin
if reject_tmin is None:
reject_tmin = self._raw_times[0]
reject_start = int(round(event_samp + reject_tmin * sfreq))
reject_start -= first_samp
reject_tmax = self.reject_tmax
if reject_tmax is None:
reject_tmax = self._raw_times[-1]
diff = int(round((self._raw_times[-1] - reject_tmax) * sfreq))
reject_stop = stop - diff
logger.debug(' Getting epoch for %d-%d' % (start, stop))
data = self._raw._check_bad_segment(start, stop, self.picks,
reject_start, reject_stop,
self.reject_by_annotation)
return data
@fill_doc
class EpochsArray(BaseEpochs):
"""Epochs object from numpy array.
Parameters
----------
data : array, shape (n_epochs, n_channels, n_times)
The channels' time series for each epoch. See notes for proper units of
measure.
info : instance of Info
Info dictionary. Consider using ``create_info`` to populate
this structure.
events : None | array of int, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
If None (default), all event values are set to 1 and event time-samples
are set to range(n_epochs).
tmin : float
Start time before event. If nothing provided, defaults to 0.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
%(baseline)s
Defaults to ``None``, i.e. no baseline correction.
proj : bool | 'delayed'
Apply SSP projection vectors. See :class:`mne.Epochs` for details.
on_missing : str
See :class:`mne.Epochs` docstring for details.
metadata : instance of pandas.DataFrame | None
See :class:`mne.Epochs` docstring for details.
.. versionadded:: 0.16
selection : ndarray | None
The selection compared to the original set of epochs.
Can be None to use ``np.arange(len(events))``.
.. versionadded:: 0.16
%(verbose)s
See Also
--------
create_info
EvokedArray
io.RawArray
Notes
-----
Proper units of measure:
* V: eeg, eog, seeg, emg, ecg, bio, ecog
* T: mag
* T/m: grad
* M: hbo, hbr
* Am: dipole
* AU: misc
"""
@verbose
def __init__(self, data, info, events=None, tmin=0, event_id=None,
reject=None, flat=None, reject_tmin=None,
reject_tmax=None, baseline=None, proj=True,
on_missing='raise', metadata=None, selection=None,
verbose=None): # noqa: D102
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 3:
raise ValueError('Data must be a 3D array of shape (n_epochs, '
'n_channels, n_samples)')
if len(info['ch_names']) != data.shape[1]:
raise ValueError('Info and data must have same number of '
'channels.')
if events is None:
n_epochs = len(data)
events = _gen_events(n_epochs)
if data.shape[0] != len(events):
raise ValueError('The number of epochs and the number of events'
'must match')
info = info.copy() # do not modify original info
tmax = (data.shape[2] - 1) / info['sfreq'] + tmin
if event_id is None: # convert to int to make typing-checks happy
event_id = {str(e): int(e) for e in np.unique(events[:, 2])}
super(EpochsArray, self).__init__(
info, data, events, event_id, tmin, tmax, baseline, reject=reject,
flat=flat, reject_tmin=reject_tmin, reject_tmax=reject_tmax,
decim=1, metadata=metadata, selection=selection, proj=proj,
on_missing=on_missing)
if len(events) != np.in1d(self.events[:, 2],
list(self.event_id.values())).sum():
raise ValueError('The events must only contain event numbers from '
'event_id')
for ii, e in enumerate(self._data):
# This is safe without assignment b/c there is no decim
self._detrend_offset_decim(e)
self.drop_bad()
def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
"""Collapse event_ids from an epochs instance into a new event_id.
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
old_event_ids : str, or list
Conditions to collapse together.
new_event_id : dict, or int
A one-element dict (or a single integer) for the new
condition. Note that for safety, this cannot be any
existing id (in epochs.event_id.values()).
copy : bool
Whether to return a new instance or modify in place.
Notes
-----
This For example (if epochs.event_id was {'Left': 1, 'Right': 2}:
combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12})
would create a 'Directional' entry in epochs.event_id replacing
'Left' and 'Right' (combining their trials).
"""
epochs = epochs.copy() if copy else epochs
old_event_ids = np.asanyarray(old_event_ids)
if isinstance(new_event_id, int):
new_event_id = {str(new_event_id): new_event_id}
else:
if not isinstance(new_event_id, dict):
raise ValueError('new_event_id must be a dict or int')
if not len(list(new_event_id.keys())) == 1:
raise ValueError('new_event_id dict must have one entry')
new_event_num = list(new_event_id.values())[0]
new_event_num = operator.index(new_event_num)
if new_event_num in epochs.event_id.values():
raise ValueError('new_event_id value must not already exist')
# could use .pop() here, but if a latter one doesn't exist, we're
# in trouble, so run them all here and pop() later
old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids])
# find the ones to replace
inds = np.any(epochs.events[:, 2][:, np.newaxis] ==
old_event_nums[np.newaxis, :], axis=1)
# replace the event numbers in the events list
epochs.events[inds, 2] = new_event_num
# delete old entries
for key in old_event_ids:
epochs.event_id.pop(key)
# add the new entry
epochs.event_id.update(new_event_id)
return epochs
def equalize_epoch_counts(epochs_list, method='mintime'):
"""Equalize the number of trials in multiple Epoch instances.
Parameters
----------
epochs_list : list of Epochs instances
The Epochs instances to equalize trial counts for.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list will be
minimized.
Notes
-----
This tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be some
time-varying (like on the scale of minutes) noise characteristics during
a recording, they could be compensated for (to some extent) in the
equalization process. This method thus seeks to reduce any of those effects
by minimizing the differences in the times of the events in the two sets of
epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the
other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times
[1, 2] in the first epochs and not [120, 121].
Examples
--------
>>> equalize_epoch_counts([epochs1, epochs2]) # doctest: +SKIP
"""
if not all(isinstance(e, BaseEpochs) for e in epochs_list):
raise ValueError('All inputs must be Epochs instances')
# make sure bad epochs are dropped
for e in epochs_list:
if not e._bad_dropped:
e.drop_bad()
event_times = [e.events[:, 0] for e in epochs_list]
indices = _get_drop_indices(event_times, method)
for e, inds in zip(epochs_list, indices):
e.drop(inds, reason='EQUALIZED_COUNT')
def _get_drop_indices(event_times, method):
"""Get indices to drop from multiple event timing lists."""
small_idx = np.argmin([e.shape[0] for e in event_times])
small_e_times = event_times[small_idx]
_check_option('method', method, ['mintime', 'truncate'])
indices = list()
for e in event_times:
if method == 'mintime':
mask = _minimize_time_diff(small_e_times, e)
else:
mask = np.ones(e.shape[0], dtype=bool)
mask[small_e_times.shape[0]:] = False
indices.append(np.where(np.logical_not(mask))[0])
return indices
def _minimize_time_diff(t_shorter, t_longer):
"""Find a boolean mask to minimize timing differences."""
from scipy.interpolate import interp1d
keep = np.ones((len(t_longer)), dtype=bool)
if len(t_shorter) == 0:
keep.fill(False)
return keep
scores = np.ones((len(t_longer)))
x1 = np.arange(len(t_shorter))
# The first set of keep masks to test
kwargs = dict(copy=False, bounds_error=False)
# this is a speed tweak, only exists for certain versions of scipy
if 'assume_sorted' in _get_args(interp1d.__init__):
kwargs['assume_sorted'] = True
shorter_interp = interp1d(x1, t_shorter, fill_value=t_shorter[-1],
**kwargs)
for ii in range(len(t_longer) - len(t_shorter)):
scores.fill(np.inf)
# set up the keep masks to test, eliminating any rows that are already
# gone
keep_mask = ~np.eye(len(t_longer), dtype=bool)[keep]
keep_mask[:, ~keep] = False
# Check every possible removal to see if it minimizes
x2 = np.arange(len(t_longer) - ii - 1)
t_keeps = np.array([t_longer[km] for km in keep_mask])
longer_interp = interp1d(x2, t_keeps, axis=1,
fill_value=t_keeps[:, -1],
**kwargs)
d1 = longer_interp(x1) - t_shorter
d2 = shorter_interp(x2) - t_keeps
scores[keep] = np.abs(d1, d1).sum(axis=1) + np.abs(d2, d2).sum(axis=1)
keep[np.argmin(scores)] = False
return keep
@verbose
def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
ignore_chs=[], verbose=None):
"""Test if data segment e is good according to reject and flat.
If full_report=True, it will give True/False as well as a list of all
offending channels.
"""
bad_tuple = tuple()
has_printed = False
checkable = np.ones(len(ch_names), dtype=bool)
checkable[np.array([c in ignore_chs
for c in ch_names], dtype=bool)] = False
for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']):
if refl is not None:
for key, thresh in refl.items():
idx = channel_type_idx[key]
name = key.upper()
if len(idx) > 0:
e_idx = e[idx]
deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1)
checkable_idx = checkable[idx]
idx_deltas = np.where(np.logical_and(f(deltas, thresh),
checkable_idx))[0]
if len(idx_deltas) > 0:
bad_names = [ch_names[idx[i]] for i in idx_deltas]
if (not has_printed):
logger.info(' Rejecting %s epoch based on %s : '
'%s' % (t, name, bad_names))
has_printed = True
if not full_report:
return False
else:
bad_tuple += tuple(bad_names)
if not full_report:
return True
else:
if bad_tuple == ():
return True, None
else:
return False, bad_tuple
def _read_one_epoch_file(f, tree, preload):
"""Read a single FIF file."""
with f as fid:
# Read the measurement info
info, meas = read_meas_info(fid, tree, clean_bads=True)
events, mappings = _read_events_fif(fid, tree)
# Metadata
metadata = None
metadata_tree = dir_tree_find(tree, FIFF.FIFFB_MNE_METADATA)
if len(metadata_tree) > 0:
for dd in metadata_tree[0]['directory']:
kind = dd.kind
pos = dd.pos
if kind == FIFF.FIFF_DESCRIPTION:
metadata = read_tag(fid, pos).data
metadata = _prepare_read_metadata(metadata)
break
# Locate the data of interest
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
del meas
if len(processed) == 0:
raise ValueError('Could not find processed data')
epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS)
if len(epochs_node) == 0:
# before version 0.11 we errantly saved with this tag instead of
# an MNE tag
epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS)
if len(epochs_node) == 0:
epochs_node = dir_tree_find(tree, 122) # 122 used before v0.11
if len(epochs_node) == 0:
raise ValueError('Could not find epochs data')
my_epochs = epochs_node[0]
# Now find the data in the block
data = None
data_tag = None
bmin, bmax = None, None
baseline = None
selection = None
drop_log = None
reject_params = {}
for k in range(my_epochs['nent']):
kind = my_epochs['directory'][k].kind
pos = my_epochs['directory'][k].pos
if kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, pos)
first = int(tag.data)
elif kind == FIFF.FIFF_LAST_SAMPLE:
tag = read_tag(fid, pos)
last = int(tag.data)
elif kind == FIFF.FIFF_EPOCH:
# delay reading until later
fid.seek(pos, 0)
data_tag = read_tag_info(fid)
data_tag.pos = pos
data_tag.type = data_tag.type ^ (1 << 30)
elif kind in [FIFF.FIFF_MNE_BASELINE_MIN, 304]:
# Constant 304 was used before v0.11
tag = read_tag(fid, pos)
bmin = float(tag.data)
elif kind in [FIFF.FIFF_MNE_BASELINE_MAX, 305]:
# Constant 305 was used before v0.11
tag = read_tag(fid, pos)
bmax = float(tag.data)
elif kind == FIFF.FIFF_MNE_EPOCHS_SELECTION:
tag = read_tag(fid, pos)
selection = np.array(tag.data)
elif kind == FIFF.FIFF_MNE_EPOCHS_DROP_LOG:
tag = read_tag(fid, pos)
drop_log = tuple(tuple(x) for x in json.loads(tag.data))
elif kind == FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT:
tag = read_tag(fid, pos)
reject_params = json.loads(tag.data)
if bmin is not None or bmax is not None:
baseline = (bmin, bmax)
n_samp = last - first + 1
logger.info(' Found the data of interest:')
logger.info(' t = %10.2f ... %10.2f ms'
% (1000 * first / info['sfreq'],
1000 * last / info['sfreq']))
if info['comps'] is not None:
logger.info(' %d CTF compensation matrices available'
% len(info['comps']))
# Inspect the data
if data_tag is None:
raise ValueError('Epochs data not found')
epoch_shape = (len(info['ch_names']), n_samp)
size_expected = len(events) * np.prod(epoch_shape)
# on read double-precision is always used
if data_tag.type == FIFF.FIFFT_FLOAT:
datatype = np.float64
fmt = '>f4'
elif data_tag.type == FIFF.FIFFT_DOUBLE:
datatype = np.float64
fmt = '>f8'
elif data_tag.type == FIFF.FIFFT_COMPLEX_FLOAT:
datatype = np.complex128
fmt = '>c8'
elif data_tag.type == FIFF.FIFFT_COMPLEX_DOUBLE:
datatype = np.complex128
fmt = '>c16'
fmt_itemsize = np.dtype(fmt).itemsize
assert fmt_itemsize in (4, 8, 16)
size_actual = data_tag.size // fmt_itemsize - 16 // fmt_itemsize
if not size_actual == size_expected:
raise ValueError('Incorrect number of samples (%d instead of %d)'
% (size_actual, size_expected))
# Calibration factors
cals = np.array([[info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0)]
for k in range(info['nchan'])], np.float64)
# Read the data
if preload:
data = read_tag(fid, data_tag.pos).data.astype(datatype)
data *= cals
# Put it all together
tmin = first / info['sfreq']
tmax = last / info['sfreq']
event_id = ({str(e): e for e in np.unique(events[:, 2])}
if mappings is None else mappings)
# In case epochs didn't have a FIFF.FIFF_MNE_EPOCHS_SELECTION tag
# (version < 0.8):
if selection is None:
selection = np.arange(len(events))
if drop_log is None:
drop_log = ((),) * len(events)
return (info, data, data_tag, events, event_id, metadata, tmin, tmax,
baseline, selection, drop_log, epoch_shape, cals, reject_params,
fmt)
@verbose
def read_epochs(fname, proj=True, preload=True, verbose=None):
"""Read epochs from a fif file.
Parameters
----------
fname : str | file-like
The epochs filename to load. Filename should end with -epo.fif or
-epo.fif.gz. If a file-like object is provided, preloading must be
used.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
preload : bool
If True, read all epochs from disk immediately. If False, epochs will
be read on demand.
%(verbose)s
Returns
-------
epochs : instance of Epochs
The epochs.
"""
return EpochsFIF(fname, proj, preload, verbose)
class _RawContainer(object):
"""Helper for a raw data container."""
def __init__(self, fid, data_tag, event_samps, epoch_shape,
cals, fmt): # noqa: D102
self.fid = fid
self.data_tag = data_tag
self.event_samps = event_samps
self.epoch_shape = epoch_shape
self.cals = cals
self.proj = False
self.fmt = fmt
def __del__(self): # noqa: D105
self.fid.close()
@fill_doc
class EpochsFIF(BaseEpochs):
"""Epochs read from disk.
Parameters
----------
fname : str | file-like
The name of the file, which should end with -epo.fif or -epo.fif.gz. If
a file-like object is provided, preloading must be used.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
preload : bool
If True, read all epochs from disk immediately. If False, epochs will
be read on demand.
%(verbose)s
See Also
--------
mne.Epochs
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
"""
@verbose
def __init__(self, fname, proj=True, preload=True,
verbose=None): # noqa: D102
if isinstance(fname, str):
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz',
'_epo.fif', '_epo.fif.gz'))
elif not preload:
raise ValueError('preload must be used with file-like objects')
fnames = [fname]
ep_list = list()
raw = list()
for fname in fnames:
fname_rep = _get_fname_rep(fname)
logger.info('Reading %s ...' % fname_rep)
fid, tree, _ = fiff_open(fname, preload=preload)
next_fname = _get_next_fname(fid, fname, tree)
(info, data, data_tag, events, event_id, metadata, tmin, tmax,
baseline, selection, drop_log, epoch_shape, cals,
reject_params, fmt) = \
_read_one_epoch_file(fid, tree, preload)
# here we ignore missing events, since users should already be
# aware of missing events if they have saved data that way
epoch = BaseEpochs(
info, data, events, event_id, tmin, tmax, baseline,
metadata=metadata, on_missing='ignore',
selection=selection, drop_log=drop_log,
proj=False, verbose=False)
ep_list.append(epoch)
if not preload:
# store everything we need to index back to the original data
raw.append(_RawContainer(fiff_open(fname)[0], data_tag,
events[:, 0].copy(), epoch_shape,
cals, fmt))
if next_fname is not None:
fnames.append(next_fname)
(info, data, events, event_id, tmin, tmax, metadata, baseline,
selection, drop_log, _) = \
_concatenate_epochs(ep_list, with_data=preload, add_offset=False)
# we need this uniqueness for non-preloaded data to work properly
if len(np.unique(events[:, 0])) != len(events):
raise RuntimeError('Event time samples were not unique')
# correct the drop log
assert len(drop_log) % len(fnames) == 0
step = len(drop_log) // len(fnames)
offsets = np.arange(step, len(drop_log) + 1, step)
drop_log = list(drop_log)
for i1, i2 in zip(offsets[:-1], offsets[1:]):
other_log = drop_log[i1:i2]
for k, (a, b) in enumerate(zip(drop_log, other_log)):
if a == ('IGNORED',) and b != ('IGNORED',):
drop_log[k] = b
drop_log = tuple(drop_log[:step])
# call BaseEpochs constructor
super(EpochsFIF, self).__init__(
info, data, events, event_id, tmin, tmax, baseline, raw=raw,
proj=proj, preload_at_end=False, on_missing='ignore',
selection=selection, drop_log=drop_log, filename=fname_rep,
metadata=metadata, verbose=verbose, **reject_params)
# use the private property instead of drop_bad so that epochs
# are not all read from disk for preload=False
self._bad_dropped = True
@verbose
def _get_epoch_from_raw(self, idx, verbose=None):
"""Load one epoch from disk."""
# Find the right file and offset to use
event_samp = self.events[idx, 0]
for raw in self._raw:
idx = np.where(raw.event_samps == event_samp)[0]
if len(idx) == 1:
fmt = raw.fmt
idx = idx[0]
size = np.prod(raw.epoch_shape) * np.dtype(fmt).itemsize
offset = idx * size + 16 # 16 = Tag header
break
else:
# read the correct subset of the data
raise RuntimeError('Correct epoch could not be found, please '
'contact mne-python developers')
# the following is equivalent to this, but faster:
#
# >>> data = read_tag(raw.fid, raw.data_tag.pos).data.astype(float)
# >>> data *= raw.cals[np.newaxis, :, :]
# >>> data = data[idx]
#
# Eventually this could be refactored in io/tag.py if other functions
# could make use of it
raw.fid.seek(raw.data_tag.pos + offset, 0)
if fmt == '>c8':
read_fmt = '>f4'
elif fmt == '>c16':
read_fmt = '>f8'
else:
read_fmt = fmt
data = np.frombuffer(raw.fid.read(size), read_fmt)
if read_fmt != fmt:
data = data.view(fmt)
data = data.astype(np.complex128)
else:
data = data.astype(np.float64)
data.shape = raw.epoch_shape
data *= raw.cals
return data
@fill_doc
def bootstrap(epochs, random_state=None):
"""Compute epochs selected by bootstrapping.
Parameters
----------
epochs : Epochs instance
epochs data to be bootstrapped
%(random_state)s
Returns
-------
epochs : Epochs instance
The bootstrap samples
"""
if not epochs.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
rng = check_random_state(random_state)
epochs_bootstrap = epochs.copy()
n_events = len(epochs_bootstrap.events)
idx = rng_uniform(rng)(0, n_events, n_events)
epochs_bootstrap = epochs_bootstrap[idx]
return epochs_bootstrap
def _check_merge_epochs(epochs_list):
"""Aux function."""
if len({tuple(epochs.event_id.items()) for epochs in epochs_list}) != 1:
raise NotImplementedError("Epochs with unequal values for event_id")
if len({epochs.tmin for epochs in epochs_list}) != 1:
raise NotImplementedError("Epochs with unequal values for tmin")
if len({epochs.tmax for epochs in epochs_list}) != 1:
raise NotImplementedError("Epochs with unequal values for tmax")
if len({epochs.baseline for epochs in epochs_list}) != 1:
raise NotImplementedError("Epochs with unequal values for baseline")
@verbose
def add_channels_epochs(epochs_list, verbose=None):
"""Concatenate channels, info and data from two Epochs objects.
Parameters
----------
epochs_list : list of Epochs
Epochs object to concatenate.
%(verbose)s Defaults to True if any of the input epochs have verbose=True.
Returns
-------
epochs : instance of Epochs
Concatenated epochs.
"""
if not all(e.preload for e in epochs_list):
raise ValueError('All epochs must be preloaded.')
info = _merge_info([epochs.info for epochs in epochs_list])
data = [epochs.get_data() for epochs in epochs_list]
_check_merge_epochs(epochs_list)
for d in data:
if len(d) != len(data[0]):
raise ValueError('all epochs must be of the same length')
data = np.concatenate(data, axis=1)
if len(info['chs']) != data.shape[1]:
err = "Data shape does not match channel number in measurement info"
raise RuntimeError(err)
events = epochs_list[0].events.copy()
all_same = all(np.array_equal(events, epochs.events)
for epochs in epochs_list[1:])
if not all_same:
raise ValueError('Events must be the same.')
proj = any(e.proj for e in epochs_list)
if verbose is None:
verbose = any(e.verbose for e in epochs_list)
epochs = epochs_list[0].copy()
epochs.info = info
epochs.picks = None
epochs.verbose = verbose
epochs.events = events
epochs.preload = True
epochs._bad_dropped = True
epochs._data = data
epochs._projector, epochs.info = setup_proj(epochs.info, False,
activate=proj)
return epochs
def _compare_epochs_infos(info1, info2, name):
"""Compare infos."""
if not isinstance(name, str): # passed epochs index
name = f'epochs[{name:d}]'
info1._check_consistency()
info2._check_consistency()
if info1['nchan'] != info2['nchan']:
raise ValueError(f'{name}.info[\'nchan\'] must match')
if set(info1['bads']) != set(info2['bads']):
raise ValueError(f'{name}.info[\'bads\'] must match')
if info1['sfreq'] != info2['sfreq']:
raise ValueError(f'{name}.info[\'sfreq\'] must match')
if set(info1['ch_names']) != set(info2['ch_names']):
raise ValueError(f'{name}.info[\'ch_names\'] must match')
if len(info2['projs']) != len(info1['projs']):
raise ValueError(f'SSP projectors in {name} must be the same')
if any(not _proj_equal(p1, p2) for p1, p2 in
zip(info2['projs'], info1['projs'])):
raise ValueError(f'SSP projectors in {name} must be the same')
if (info1['dev_head_t'] is None) != (info2['dev_head_t'] is None) or \
(info1['dev_head_t'] is not None and not
np.allclose(info1['dev_head_t']['trans'],
info2['dev_head_t']['trans'], rtol=1e-6)):
raise ValueError(f'{name}.info[\'dev_head_t\'] must match. The '
'instances probably come from different runs, and '
'are therefore associated with different head '
'positions. Manually change info[\'dev_head_t\'] to '
'avoid this message but beware that this means the '
'MEG sensors will not be properly spatially aligned. '
'See mne.preprocessing.maxwell_filter to realign the '
'runs to a common head position.')
def _concatenate_epochs(epochs_list, with_data=True, add_offset=True):
"""Auxiliary function for concatenating epochs."""
if not isinstance(epochs_list, (list, tuple)):
raise TypeError('epochs_list must be a list or tuple, got %s'
% (type(epochs_list),))
for ei, epochs in enumerate(epochs_list):
if not isinstance(epochs, BaseEpochs):
raise TypeError('epochs_list[%d] must be an instance of Epochs, '
'got %s' % (ei, type(epochs)))
out = epochs_list[0]
offsets = [0]
if with_data:
out.drop_bad()
offsets.append(len(out))
events = [out.events]
metadata = [out.metadata]
baseline, tmin, tmax = out.baseline, out.tmin, out.tmax
info = deepcopy(out.info)
verbose = out.verbose
drop_log = out.drop_log
event_id = deepcopy(out.event_id)
selection = out.selection
# offset is the last epoch + tmax + 10 second
events_offset = (np.max(out.events[:, 0]) +
int((10 + tmax) * epochs.info['sfreq']))
for ii, epochs in enumerate(epochs_list[1:], 1):
_compare_epochs_infos(epochs.info, info, ii)
if not np.allclose(epochs.times, epochs_list[0].times):
raise ValueError('Epochs must have same times')
if epochs.baseline != baseline:
raise ValueError('Baseline must be same for all epochs')
# compare event_id
common_keys = list(set(event_id).intersection(set(epochs.event_id)))
for key in common_keys:
if not event_id[key] == epochs.event_id[key]:
msg = ('event_id values must be the same for identical keys '
'for all concatenated epochs. Key "{}" maps to {} in '
'some epochs and to {} in others.')
raise ValueError(msg.format(key, event_id[key],
epochs.event_id[key]))
if with_data:
epochs.drop_bad()
offsets.append(len(epochs))
evs = epochs.events.copy()
# add offset
if add_offset:
evs[:, 0] += events_offset
# Update offset for the next iteration.
# offset is the last epoch + tmax + 10 second
events_offset += (np.max(epochs.events[:, 0]) +
int((10 + tmax) * epochs.info['sfreq']))
events.append(evs)
selection = np.concatenate((selection, epochs.selection))
drop_log = drop_log + epochs.drop_log
event_id.update(epochs.event_id)
metadata.append(epochs.metadata)
events = np.concatenate(events, axis=0)
# Create metadata object (or make it None)
n_have = sum(this_meta is not None for this_meta in metadata)
if n_have == 0:
metadata = None
elif n_have != len(metadata):
raise ValueError('%d of %d epochs instances have metadata, either '
'all or none must have metadata'
% (n_have, len(metadata)))
else:
pd = _check_pandas_installed(strict=False)
if pd is not False:
metadata = pd.concat(metadata)
else: # dict of dicts
metadata = sum(metadata, list())
assert len(offsets) == (len(epochs_list) if with_data else 0) + 1
data = None
if with_data:
offsets = np.cumsum(offsets)
for start, stop, epochs in zip(offsets[:-1], offsets[1:], epochs_list):
this_data = epochs.get_data()
if data is None:
data = np.empty(
(offsets[-1], len(out.ch_names), len(out.times)),
dtype=this_data.dtype)
data[start:stop] = this_data
return (info, data, events, event_id, tmin, tmax, metadata, baseline,
selection, drop_log, verbose)
def _finish_concat(info, data, events, event_id, tmin, tmax, metadata,
baseline, selection, drop_log, verbose):
"""Finish concatenation for epochs not read from disk."""
selection = np.where([len(d) == 0 for d in drop_log])[0]
out = BaseEpochs(
info, data, events, event_id, tmin, tmax, baseline=baseline,
selection=selection, drop_log=drop_log, proj=False,
on_missing='ignore', metadata=metadata, verbose=verbose)
out.drop_bad()
return out
def concatenate_epochs(epochs_list, add_offset=True):
"""Concatenate a list of epochs into one epochs object.
Parameters
----------
epochs_list : list
List of Epochs instances to concatenate (in order).
add_offset : bool
If True, a fixed offset is added to the event times from different
Epochs sets, such that they are easy to distinguish after the
concatenation.
If False, the event times are unaltered during the concatenation.
Returns
-------
epochs : instance of Epochs
The result of the concatenation (first Epochs instance passed in).
Notes
-----
.. versionadded:: 0.9.0
"""
return _finish_concat(*_concatenate_epochs(epochs_list,
add_offset=add_offset))
@verbose
def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None,
origin='auto', weight_all=True, int_order=8, ext_order=3,
destination=None, ignore_ref=False, return_mapping=False,
mag_scale=100., verbose=None):
u"""Average data using Maxwell filtering, transforming using head positions.
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
%(maxwell_pos)s
orig_sfreq : float | None
The original sample frequency of the data (that matches the
event sample numbers in ``epochs.events``). Can be ``None``
if data have not been decimated or resampled.
%(picks_all_data)s
%(maxwell_origin)s
weight_all : bool
If True, all channels are weighted by the SSS basis weights.
If False, only MEG channels are weighted, other channels
receive uniform weight per epoch.
%(maxwell_int)s
%(maxwell_ext)s
%(maxwell_reg)s
%(maxwell_dest)s
%(maxwell_ref)s
return_mapping : bool
If True, return the mapping matrix.
%(maxwell_mag)s
.. versionadded:: 0.13
%(verbose)s
Returns
-------
evoked : instance of Evoked
The averaged epochs.
See Also
--------
mne.preprocessing.maxwell_filter
mne.chpi.read_head_pos
Notes
-----
The Maxwell filtering version of this algorithm is described in [1]_,
in section V.B "Virtual signals and movement correction", equations
40-44. For additional validation, see [2]_.
Regularization has not been added because in testing it appears to
decrease dipole localization accuracy relative to using all components.
Fine calibration and cross-talk cancellation, however, could be added
to this algorithm based on user demand.
.. versionadded:: 0.11
References
----------
.. [1] Taulu S. and Kajola M. "Presentation of electromagnetic
multichannel data: The signal space separation method,"
Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005.
.. [2] Wehner DT, Hämäläinen MS, Mody M, Ahlfors SP. "Head movements
of children in MEG: Quantification, effects on source
estimation, and compensation. NeuroImage 40:541–550, 2008.
""" # noqa: E501
from .preprocessing.maxwell import (_trans_sss_basis, _reset_meg_bads,
_check_usable, _col_norm_pinv,
_get_n_moments, _get_mf_picks_fix_mags,
_prep_mf_coils, _check_destination,
_remove_meg_projs, _get_coil_scale)
if head_pos is None:
raise TypeError('head_pos must be provided and cannot be None')
from .chpi import head_pos_to_trans_rot_t
if not isinstance(epochs, BaseEpochs):
raise TypeError('epochs must be an instance of Epochs, not %s'
% (type(epochs),))
orig_sfreq = epochs.info['sfreq'] if orig_sfreq is None else orig_sfreq
orig_sfreq = float(orig_sfreq)
if isinstance(head_pos, np.ndarray):
head_pos = head_pos_to_trans_rot_t(head_pos)
trn, rot, t = head_pos
del head_pos
_check_usable(epochs)
origin = _check_origin(origin, epochs.info, 'head')
recon_trans = _check_destination(destination, epochs.info, True)
logger.info('Aligning and averaging up to %s epochs'
% (len(epochs.events)))
if not np.array_equal(epochs.events[:, 0], np.unique(epochs.events[:, 0])):
raise RuntimeError('Epochs must have monotonically increasing events')
info_to = epochs.info.copy()
meg_picks, mag_picks, grad_picks, good_mask, _ = \
_get_mf_picks_fix_mags(info_to, int_order, ext_order, ignore_ref)
coil_scale, mag_scale = _get_coil_scale(
meg_picks, mag_picks, grad_picks, mag_scale, info_to)
n_channels, n_times = len(epochs.ch_names), len(epochs.times)
other_picks = np.setdiff1d(np.arange(n_channels), meg_picks)
data = np.zeros((n_channels, n_times))
count = 0
# keep only MEG w/bad channels marked in "info_from"
info_from = pick_info(info_to, meg_picks[good_mask], copy=True)
all_coils_recon = _prep_mf_coils(info_to, ignore_ref=ignore_ref)
all_coils = _prep_mf_coils(info_from, ignore_ref=ignore_ref)
# remove MEG bads in "to" info
_reset_meg_bads(info_to)
# set up variables
w_sum = 0.
n_in, n_out = _get_n_moments([int_order, ext_order])
S_decomp = 0. # this will end up being a weighted average
last_trans = None
decomp_coil_scale = coil_scale[good_mask]
exp = dict(int_order=int_order, ext_order=ext_order, head_frame=True,
origin=origin)
n_in = _get_n_moments(int_order)
for ei, epoch in enumerate(epochs):
event_time = epochs.events[epochs._current - 1, 0] / orig_sfreq
use_idx = np.where(t <= event_time)[0]
if len(use_idx) == 0:
trans = info_to['dev_head_t']['trans']
else:
use_idx = use_idx[-1]
trans = np.vstack([np.hstack([rot[use_idx], trn[[use_idx]].T]),
[[0., 0., 0., 1.]]])
loc_str = ', '.join('%0.1f' % tr for tr in (trans[:3, 3] * 1000))
if last_trans is None or not np.allclose(last_trans, trans):
logger.info(' Processing epoch %s (device location: %s mm)'
% (ei + 1, loc_str))
reuse = False
last_trans = trans
else:
logger.info(' Processing epoch %s (device location: same)'
% (ei + 1,))
reuse = True
epoch = epoch.copy() # because we operate inplace
if not reuse:
S = _trans_sss_basis(exp, all_coils, trans,
coil_scale=decomp_coil_scale)
# Get the weight from the un-regularized version (eq. 44)
weight = np.linalg.norm(S[:, :n_in])
# XXX Eventually we could do cross-talk and fine-cal here
S *= weight
S_decomp += S # eq. 41
epoch[slice(None) if weight_all else meg_picks] *= weight
data += epoch # eq. 42
w_sum += weight
count += 1
del info_from
mapping = None
if count == 0:
data.fill(np.nan)
else:
data[meg_picks] /= w_sum
data[other_picks] /= w_sum if weight_all else count
# Finalize weighted average decomp matrix
S_decomp /= w_sum
# Get recon matrix
# (We would need to include external here for regularization to work)
exp['ext_order'] = 0
S_recon = _trans_sss_basis(exp, all_coils_recon, recon_trans)
exp['ext_order'] = ext_order
# We could determine regularization on basis of destination basis
# matrix, restricted to good channels, as regularizing individual
# matrices within the loop above does not seem to work. But in
# testing this seemed to decrease localization quality in most cases,
# so we do not provide the option here.
S_recon /= coil_scale
# Invert
pS_ave = _col_norm_pinv(S_decomp)[0][:n_in]
pS_ave *= decomp_coil_scale.T
# Get mapping matrix
mapping = np.dot(S_recon, pS_ave)
# Apply mapping
data[meg_picks] = np.dot(mapping, data[meg_picks[good_mask]])
info_to['dev_head_t'] = recon_trans # set the reconstruction transform
evoked = epochs._evoked_from_epoch_data(data, info_to, picks,
n_events=count, kind='average',
comment=epochs._name)
_remove_meg_projs(evoked) # remove MEG projectors, they won't apply now
logger.info('Created Evoked dataset from %s epochs' % (count,))
return (evoked, mapping) if return_mapping else evoked
@verbose
def make_fixed_length_epochs(raw, duration=1., preload=False,
reject_by_annotation=True, verbose=None):
"""Divide continuous raw data into equal-sized consecutive epochs.
Parameters
----------
raw : instance of Raw
Raw data to divide into segments.
duration : float
Duration of each epoch in seconds. Defaults to 1.
%(preload)s
%(reject_by_annotation_epochs)s
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
epochs : instance of Epochs
Segmented data.
Notes
-----
.. versionadded:: 0.20
"""
events = make_fixed_length_events(raw, 1, duration=duration)
delta = 1. / raw.info['sfreq']
return Epochs(raw, events, event_id=[1], tmin=0, tmax=duration - delta,
baseline=None, preload=preload,
reject_by_annotation=reject_by_annotation, verbose=verbose)
| bsd-3-clause |
mavrix93/LightCurvesClassifier | lcc_web/web/interface/lcc_views/visualization.py | 1 | 10603 | import os
import numpy as np
import pandas as pd
from django.conf import settings
from django.shortcuts import render
from lcc.data_manager.package_reader import PackageReader
from lcc.stars_processing.tools.visualization import plotUnsupProbabSpace
from interface.helpers import getFields, load_test_stars
from interface.helpers import makeDesc
from interface.helpers import parse_combinations
from interface.helpers import parse_comp_stars
from interface.helpers import parse_stars
def stars(request):
PAGE_TITLE = "Show light curves"
PAGE_INFO = "Select dat files of light curves or fits of stars. Also you can use prepared sample - just don't select anything"
if "sub" in request.POST:
fi = request.FILES.getlist("my_file")
try:
if fi:
sta = parse_stars(fi)
else:
sta = load_test_stars(os.path.join(settings.TEST_SAMPLE, "sample1"))[:5]
except Exception as e:
return render(request, 'interface/error_page.html', {"error_m": "Couldn't parse star files: {}".format(str(e))})
lcs = []
labels = []
for st in sta:
if st.lightCurve:
lcs.append(
[st.lightCurve.time.tolist(), st.lightCurve.mag.tolist(), st.lightCurve.err.tolist()])
labels.append(str(st.name))
else:
lcs, labels = [], []
return render(request, "interface/browse.html", {"page_title": PAGE_TITLE,
"page_info": PAGE_INFO,
"lcs": lcs,
"labels": labels})
def unsup_clust(request):
PAGE_TITLE = "Unsupervised clustering"
PAGE_INFO1 = '''There are text input per every parameter of descriptors and deciders. You have to specify just one <br>
value.
<br><br>
For evaluating content as python code wrapp the code into "`". For example:<br><br>
`True` - bool value (not string)<br>
`7*6` - integer (42)<br>
`[("b_mag","v_mag"),("r_mag","i_mag")]` - list of tuples of strings<br><br>
It is possible to select multiple descriptors and deciders. <br>
<br>
NOTE that it raises error if loaded stars dont contain desired attribute (light curve, color index etc)
'''
PAGE_INFO2 = """After submiting you can aim courser to a point in probability plot to see additional<br>
information about the star. You can also click on it to see the light curve"""
if "descriptors_l" in request.POST:
sample_files = request.FILES.getlist("sample_files")
descriptor_names = request.POST.get("descriptors_l", "").split(";")
deciders_names = request.POST.get("deciders_l", "").split(";")
tuned_params, _static_params = parse_combinations(
descriptor_names + deciders_names, request.POST, split_by=":")
static_params = parse_comp_stars(request.FILES)
if not (tuned_params and hasattr(tuned_params, "__iter__") and tuned_params[0] == {}):
return render(request, 'interface/error_page.html',
{
"error_m": "Parameters ranges are no supported.<br>Insert just exact values.<br>Got %i combinations" % len(
tuned_params)})
# if tuned_params[0:
# raise QueryInputError("Dont insert ranges, just exact values!")
for key, value in _static_params.items():
if key in static_params:
static_params[key].update(value)
else:
static_params[key] = value
try:
if not sample_files:
stars = load_test_stars(os.path.join(settings.TEST_SAMPLE, "sample1"))
else:
stars = parse_stars(sample_files)
except Exception as e:
return render(request, 'interface/error_page.html', {"error_m": "Couldn't parse star files<br><br>Error msg: %s" % str(e)})
deciders = [desc for desc in PackageReader().getClasses(
"unsup_deciders") if desc.__name__ in deciders_names]
descriptors = [desc for desc in PackageReader().getClasses(
"descriptors") if desc.__name__ in descriptor_names]
ready_descriptors = makeDesc(descriptors, static_params)
if deciders:
act_decider = makeDesc(deciders, static_params)[0]
lcs = []
labels = []
st_info = []
for st in stars:
lc = st.lightCurve
if lc:
lab = st.name
labels.append(lab)
stkeys = list(st.more.keys())
stval = list(st.more.values())
if len(stkeys) >= 3:
inf = lab + "<br>" + \
"\t|\t".join(stkeys[:3]) + "<br>" + \
"\t|\t".join([str(x) for x in stval[:3]])
inf += "<br>" + \
"\t|\t".join(stkeys[3:]) + "<br>" + \
"\t|\t".join([str(x) for x in stval[3:]])
else:
inf = lab + "<br>" + \
"\t|\t".join(stkeys) + "<br>" + \
"\t|\t".join([str(x) for x in stval])
st_info.append(str(inf))
lcs.append([lc.time.tolist(), lc.mag.tolist()])
coords = []
for desc in ready_descriptors:
c = desc.getSpaceCoords([st for st in stars if st.lightCurve])
if c and not hasattr(c[0], "__iter__"):
c = [[g] for g in c]
if not coords:
coords = c
elif c and c[0]:
if hasattr(c[0], "__iter__"):
coords = [list(a)+list(b) for a,b in zip(coords, c)]
else:
coords = [[a] + [b] for a, b in zip(coords, c)]
df_coords = pd.DataFrame(coords)
df_coords.fillna(np.NaN)
df_coords.dropna(inplace=True)
space_coords = df_coords.values
if not len(space_coords):
return render(request, 'interface/error_page.html',
{
"error_m": "No space coordinates obtained from given stars. Check if input data files contain desired attribute."})
axis = []
for desc in ready_descriptors:
if hasattr(desc.LABEL, "__iter__"):
axis += desc.LABEL
else:
axis.append(desc.LABEL)
all_axis = axis
all_space_coords = [x.tolist() for x in space_coords.T]
if deciders:
act_decider.learn(space_coords)
_probab_data = plotUnsupProbabSpace(
space_coords, act_decider, "return", N=100)
if _probab_data and len(_probab_data) == 4:
xx, yy, Z, centroids = _probab_data
probab_data = [xx.tolist(), yy.tolist(), Z.tolist()]
elif _probab_data and len(_probab_data) == 5:
xx, yy, Z, centroids, space_coords = _probab_data
probab_data = [xx.tolist(), yy.tolist(), Z.tolist()]
axis = ["Reduced dim 1", "Reduced dim 2"]
elif _probab_data and len(_probab_data) == 3:
xx, yy, centroids = _probab_data
probab_data = [xx.tolist(), yy.tolist()]
else:
probab_data = []
centroids = np.array([])
centroids = list([c.tolist() for c in centroids.T])
plot_title = act_decider.__class__.__name__
else:
probab_data = [[], [], []]
centroids = [[], []]
plot_title = "Space coordinates"
coo_data = [x.tolist() for x in space_coords.T]
return render(request, 'interface/unsupervised.html', {"page_title": PAGE_TITLE,
"page_info": PAGE_INFO2,
"point_labels": [],
"probab_data": probab_data,
"space_coords": all_space_coords,
"coo_data": coo_data,
"zeroes": [0 for _ in coo_data[0]],
"centroids": centroids,
"probab_plot_axis": axis,
"all_axis": all_axis,
"coo_plot_labels": st_info,
"probab_plot_title": plot_title,
"lcs": lcs,
"labels": [str(l) for l in labels]})
else:
descriptors = PackageReader().getClassesDict("descriptors")
deciders = PackageReader().getClassesDict("unsup_deciders")
descriptors_fields = getFields(descriptors)
deciders_fields = getFields(deciders)
desc_docs = deciders.copy()
desc_docs.update(descriptors)
click_obj_id = []
popup_txt_id = []
popup_cont = []
for nam, val in descriptors_fields + deciders_fields:
if val:
click_obj_id.append(nam + "_head")
popup_txt_id.append(nam + "_popup")
doc_txt = desc_docs[nam].__doc__
doc_txt = doc_txt.replace(" ", "	")
doc_txt = doc_txt.replace("\n", "<br>")
popup_cont.append(doc_txt)
to_inter = zip(click_obj_id, popup_txt_id, popup_cont)
return render(request, 'interface/unsupervised.html', {"page_title": PAGE_TITLE,
"page_info": PAGE_INFO1,
"descriptors": list(descriptors.keys()),
"deciders": list(deciders.keys()),
"descriptors_fields": descriptors_fields,
"deciders_fields": deciders_fields,
"to_inter": to_inter})
| mit |
isomerase/mozziesniff | data/experiments/Dickinson_experiments/dick_pickle.py | 2 | 5178 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 22:17:13 2015
@author: Richard Decal
"""
from matplotlib import pyplot as plt
import seaborn as sns
#sns.set_palette("muted", 8)
import pickle
fname = './mozzie_histograms.pickle'
with open(fname) as f:
mozzie_hists = pickle.load(f)
def main(plotting = True):
odor_off = mozzie_hists['odor_off_hists']
odor_on = mozzie_hists['odor_on_hists']
## dict structure
# 'acceleration'
# 'y'
# 'x'
# 'z'
# 'abs'
#'velocity'
# 'y'
# 'normed_cts', 'bin_centers'
# 'x'
# 'z'
# 'abs'
#'angular_velocity'
# 'y'
# 'x'
# 'z'
# 'abs'
if plotting is True:
# Plot!
fig, axs = sns.plt.subplots(2, 2)#, tight_layout=True)
#### Velocity
### v Odor off
#x
axs[0,0].plot(odor_off["velocity"]['x']['bin_centers'][::-1], odor_off["velocity"]['x']['normed_cts'], color=sns.desaturate("blue", .4), lw=2, label='$\mathbf{\dot{x}}$')
#y
axs[0,0].plot(odor_off["velocity"]['y']['bin_centers'], odor_off["velocity"]['y']['normed_cts'], color=sns.desaturate("green", .4), lw=2, label='$\mathbf{\dot{y}}$')
#z
axs[0,0].plot(odor_off["velocity"]['z']['bin_centers'], odor_off["velocity"]['z']['normed_cts'], color=sns.desaturate("red", .4), lw=2, label='$\mathbf{\dot{z}}$')
#abs
axs[0,0].plot(odor_off["velocity"]['abs']['bin_centers'], odor_off["velocity"]['abs']['normed_cts'], color=sns.desaturate("black", .4), lw=2, label='$\mathbf{\| v \|}$')
axs[0,0].set_ylabel("Probabilities (odor off)")
axs[0,0].legend()
#plt.savefig("./Agent Model/figs/DickinsonFigs/odorOFF_velo distributions.png")
###v Odor on
#x
axs[1,0].plot(odor_on["velocity"]['x']['bin_centers'][::-1], odor_on["velocity"]['x']['normed_cts'], color=sns.desaturate("blue", .4), lw=2, label='$\mathbf{\dot{x}}$')
#y
axs[1,0].plot(odor_on["velocity"]['y']['bin_centers'], odor_on["velocity"]['y']['normed_cts'], color=sns.desaturate("green", .4), lw=2, label='$\mathbf{\dot{y}}$')
#z
axs[1,0].plot(odor_on["velocity"]['z']['bin_centers'], odor_on["velocity"]['z']['normed_cts'], color=sns.desaturate("red", .4), lw=2, label='$\mathbf{\dot{z}}$')
#abs
axs[1,0].plot(odor_on["velocity"]['abs']['bin_centers'], odor_on["velocity"]['abs']['normed_cts'], color=sns.desaturate("black", .4), lw=2, label='$\| \mathbf{v} \|$')
axs[1,0].set_ylabel("Probabilities (odor on)") # setting for whole row
axs[1,0].set_xlabel("Velocity Distributions ($m/s$)")# setting for whole col
axs[1,0].legend()
#plt.savefig("./Agent Model/figs/DickinsonFigs/odorON_velo distributions.png")
#### Acceleration
###a Odor off
#x
axs[0,1].plot(odor_off["acceleration"]['x']['bin_centers'], odor_off["acceleration"]['x']['normed_cts'], color=sns.desaturate("blue", .4), lw=2, label='$\mathbf{\ddot{x}}$')
#sns.barplot(odor_off["acceleration"]['x']['bin_centers'], odor_off["acceleration"]['x']['normed_cts'])
#y
axs[0,1].plot(odor_off["acceleration"]['y']['bin_centers'], odor_off["acceleration"]['y']['normed_cts'], color=sns.desaturate("green", .4), lw=2, label='$\mathbf{\ddot{y}}$')
#z
axs[0,1].plot(odor_off["acceleration"]['z']['bin_centers'], odor_off["acceleration"]['z']['normed_cts'], color=sns.desaturate("red", .4), lw=2, label='$\mathbf{\ddot{z}}$')
#abs
axs[0, 1].plot(odor_off["acceleration"]['abs']['bin_centers'], odor_off["acceleration"]['abs']['normed_cts'], color=sns.desaturate("black", .4), lw=2, label='$\| \mathbf{a} \|$')
axs[0, 1].legend()
#plt.savefig("./Agent Model/figs/DickinsonFigs/odorOFF_accel distributions.png")
###a Odor on
#x
axs[1, 1].plot(odor_on["acceleration"]['x']['bin_centers'], odor_on["acceleration"]['x']['normed_cts'], color=sns.desaturate("blue", .4), lw=2, label='$\mathbf{\ddot{x}}$')
#sns.barplot(odor_off["acceleration"]['x']['bin_centers'], odor_off["acceleration"]['x']['normed_cts'])
#y
axs[1, 1].plot(odor_on["acceleration"]['y']['bin_centers'], odor_on["acceleration"]['y']['normed_cts'], color=sns.desaturate("green", .4), lw=2, label='$\mathbf{\ddot{y}}$')
#z
axs[1, 1].plot(odor_on["acceleration"]['z']['bin_centers'], odor_on["acceleration"]['z']['normed_cts'], color=sns.desaturate("red", .4), lw=2, label='$\mathbf{\ddot{z}}$')
#abs
axs[1,1].plot(odor_on["acceleration"]['abs']['bin_centers'], odor_on["acceleration"]['abs']['normed_cts'], color=sns.desaturate("black", .4), lw=2, label='$\| \mathbf{a} \|$')
axs[1,1].set_xlabel("Acceleration Distributions ($m^s/s$)")
axs[1,1].legend()
fig.suptitle("Dickinson Distributions", fontsize=14)
plt.savefig("Dicks distributions.png")
return odor_off
if '__name__' is '__main__':
print "hi"
odor_off = main(plotting=True) | mit |
mmoiozo/IROS | sw/airborne/test/ahrs/ahrs_utils.py | 86 | 4923 | #! /usr/bin/env python
# Copyright (C) 2011 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function
import subprocess
import numpy as np
import matplotlib.pyplot as plt
def run_simulation(ahrs_type, build_opt, traj_nb):
print("\nBuilding ahrs")
args = ["make", "clean", "run_ahrs_on_synth", "AHRS_TYPE=AHRS_TYPE_" + ahrs_type] + build_opt
#print(args)
p = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
for i in outputlines:
print(" # " + i, end=' ')
print()
print("Running simulation")
print(" using traj " + str(traj_nb))
p = subprocess.Popen(args=["./run_ahrs_on_synth", str(traj_nb)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=False)
outputlines = p.stdout.readlines()
p.wait()
# for i in outputlines:
# print(" "+i, end=' ')
# print("\n")
ahrs_data_type = [('time', 'float32'),
('phi_true', 'float32'), ('theta_true', 'float32'), ('psi_true', 'float32'),
('p_true', 'float32'), ('q_true', 'float32'), ('r_true', 'float32'),
('bp_true', 'float32'), ('bq_true', 'float32'), ('br_true', 'float32'),
('phi_ahrs', 'float32'), ('theta_ahrs', 'float32'), ('psi_ahrs', 'float32'),
('p_ahrs', 'float32'), ('q_ahrs', 'float32'), ('r_ahrs', 'float32'),
('bp_ahrs', 'float32'), ('bq_ahrs', 'float32'), ('br_ahrs', 'float32')]
mydescr = np.dtype(ahrs_data_type)
data = [[] for dummy in xrange(len(mydescr))]
# import code; code.interact(local=locals())
for line in outputlines:
if line.startswith("#"):
print(" " + line, end=' ')
else:
fields = line.strip().split(' ')
#print(fields)
for i, number in enumerate(fields):
data[i].append(number)
print()
for i in xrange(len(mydescr)):
data[i] = np.cast[mydescr[i]](data[i])
return np.rec.array(data, dtype=mydescr)
def plot_simulation_results(plot_true_state, lsty, label, sim_res):
print("Plotting Results")
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_ahrs, lsty, label=label)
plt.ylabel('degres')
plt.title('phi')
plt.legend()
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_ahrs, lsty)
plt.title('theta')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_ahrs, lsty)
plt.title('psi')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_ahrs, lsty)
plt.ylabel('degres/s')
plt.title('p')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_ahrs, lsty)
plt.title('q')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_ahrs, lsty)
plt.title('r')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_ahrs, lsty)
plt.ylabel('degres/s')
plt.xlabel('time in s')
plt.title('bp')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_ahrs, lsty)
plt.xlabel('time in s')
plt.title('bq')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_ahrs, lsty)
plt.xlabel('time in s')
plt.title('br')
if plot_true_state:
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_true, 'r--')
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_true, 'r--')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_true, 'r--')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_true, 'r--')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_true, 'r--')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_true, 'r--')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_true, 'r--')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_true, 'r--')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_true, 'r--')
def show_plot():
plt.show()
| gpl-2.0 |
potash/scikit-learn | sklearn/exceptions.py | 14 | 4945 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior.
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifies the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
.. versionchanged:: 0.18
Moved from sklearn.utils.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
.. versionadded:: 0.18
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
.. versionchanged:: 0.18
Moved from sklearn.cross_validation.
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
.. versionchanged:: 0.18
Moved from sklearn.utils.validation, extends EfficiencyWarning.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid
.. versionchanged:: 0.18
Moved from sklearn.base.
"""
| bsd-3-clause |
mwv/scikit-learn | sklearn/preprocessing/label.py | 137 | 27165 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
tdhopper/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
vancan1ty/SEAT | DataProcessing.py | 1 | 7008 | # Copyright (C) 2015 Currell Berry, Justin Jackson, and Team 41 Epilepsy Modeling
#
# This file is part of SEAT (Simple EEG Analysis Tool).
#
# SEAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SEAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SEAT. If not, see <http://www.gnu.org/licenses/>.
"""this file contains data processing functionality and matplotlib-based visualizations"""
import matplotlib
import mne
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import scipy as sp
from scipy.signal import butter, lfilter, freqz
from PIL import Image
import re
import wavelets
SAMPLING_RATE=256
#below is filter stuff
#derived from http://stackoverflow.com/questions/25191620/creating-lowpass-filter-in-scipy-understanding-methods-and-units
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
b, a = butter_lowpass(cutoff, fs, order=order)
y = lfilter(b, a, data)
return y
def butter_bandpass(lowcutoff, highcutoff, fs, order=5):
nyq = 0.5 * fs
low_normal_cutoff = lowcutoff / nyq
high_normal_cutoff = highcutoff / nyq
b, a = butter(order, [low_normal_cutoff, high_normal_cutoff], btype='bandpass', analog=False)
return b, a
def butter_bandpass_filter(data, lowcutoff, highcutoff, fs, order=5):
b, a = butter_bandpass(lowcutoff, highcutoff, fs, order=order)
y = lfilter(b, a, data)
return y
def convertSpikesStructureToLinearForm(spikesstructure):
"""converts a spikesStructure which identifies spikes "detected" on each
channel into a simple array of sample nos where spikes were detected"""
spikeSet = set()
for arr in spikesstructure:
for item in arr:
spikeSet.add(item)
out = list(spikeSet)
out.sort()
return out
def stupidIdentifySpikes(data, spikekernellength=128, cutoff=0.0133):
"""CB expects a list of arrays as data."""
thekernel = sp.signal.morlet(spikekernellength)
#[CB 9/5/2015] So this kernel is only for "detecting" spikes of a given format.
#a.k.a. it sucks.
#plt.plot(thekernel)
#plt.show()
#ldata2, times2 = data[2:20:3, start:stop]
accumulator = []
for arr in data:
correlated = sp.signal.correlate(arr, thekernel)
accumulator.append(correlated)
accumulated = np.vstack(accumulator)
#for arr in accumulated:
# plt.plot(arr)
#plt.show()
spikesout = []
for i in range(0, len(accumulated)):
spikesout.append([])
for i2 in range(0, len(accumulated[i])):
if(accumulated[i][i2]>=cutoff):
spikesout[i].append(i2)
return spikesout
def amplitude_adjust_data(dataSeries, amplitudeFactor):
out = map(lambda x: x*amplitudeFactor, dataSeries)
def getDisplayData(realData, start_time, end_time, amplitude_adjust, lowpass, highpass, channels=range(1,15)):
"""given some real EEG data, getDisplayData processes it in a way that is useful for display
purposes and returns the results"""
start, stop = realData.time_as_index([start_time, end_time])
ldata, ltimes = realData[channels, start:stop]
#spikesStructure = stupidIdentifySpikes(ldata, cutoff=0.0005)
#linSpikes = convertSpikesStructureToLinearForm(spikesStructure)
#avgdata = np.average(np.array(ldata),0)
ldata2 = map(lambda x: amplitude_adjust*butter_bandpass_filter(x,lowpass,highpass,SAMPLING_RATE), ldata)
return (ldata2,ltimes)
def load_raw_annotations(rawAnnotationsPath):
myfile = open(rawAnnotationsPath, 'r')
startFound = False
annotations = []
for line in myfile:
if not startFound:
if re.match('\s*Time\s+Duration\s+Title', line) != None:
startFound = True
else:
#note this assumes there is no duration in the file
matches = re.match('(\d*):(\d*):(\d*) \t\t(.*)', line)
#print matches.groups()
entry = ((int(matches.group(1)),int(matches.group(2)),int(matches.group(3))), None , matches.group(4))
annotations.append(entry)
myfile.close()
return annotations
# truth is (time,duration,title)
# predictions is [time]
# time is (hour, minute, second)
# returns (truePositives, falsePositives, falseNegatives)
def score_predictions(truth, predictions):
spikeList = [] #tuple of (time, duration)
for time,duration,title in truth:
if title == 'spike':
if duration is None: duration = (0,0,1)
spikeList.append((time, duration))
numSpikes = len(spikeList)
numPredictions = len(predictions)
numCorrect = 0
for (spikehr,spikemin,spikesec),spikedur in spikeList:
spiketime = (spikehr * 3600) + (spikemin * 60) + spikesec
found = False
for pred in predictions:
predtime = (pred[0] * 3600) + (pred[1] * 60) + pred[2]
if (spiketime+spikedur >= predtime and spiketime-spikedur <= predtime):
found = True
break
if found:
numCorrect += 1
return (numCorrect, numPredictions - numCorrect, numSpikes - numCorrect)
def generate_and_plot_waveletAnalysis(rawData,channel,startTime,endTime,samplingRate):
"""takes in a Raw, indexes """
start, stop = rawData.time_as_index([startTime, endTime])
ldata, ltimes = rawData[channel, start:stop]
print "ldata shape: " + str(ldata.shape)
wa = make_waveletAnalysis(ldata[0],samplingRate)
do_tfr_plot(wa.time+startTime, wa.scales, wa.wavelet_power,ldata[0])
def make_waveletAnalysis(data,samplingRate):
"""takes in array containing data for one component/channel, and the samplingRate"""
dt = 1.0/samplingRate
wa = wavelets.WaveletAnalysis(data, dt=dt)
return wa
def do_tfr_plot(time,scales,power,data):
"""data is included so that you can see the channel as well"""
fig, axarr = plt.subplots(2,sharex=True)
T, S = np.meshgrid(time, scales)
print "shape: " + str(power.shape)
gs = gridspec.GridSpec(2, 1,height_ratios=[2,1])
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1],sharex=ax1)
fig.tight_layout()
# print "time[0]: " + str(time[0]) + " time[-1] " + str(time[-1])
ax1.set_xlim([time[0], time[-1]])
print "scales.shape " + str(scales.shape)
ax1.contourf(T, S, power, 100)
ax1.set_yscale('log')
ax2.plot(time, data)
plt.show()
| gpl-3.0 |
dsm054/pandas | pandas/core/indexes/api.py | 2 | 8350 | import textwrap
import warnings
from pandas.core.indexes.base import (Index,
_new_Index,
ensure_index,
ensure_index_from_sequences,
InvalidIndexError) # noqa
from pandas.core.indexes.category import CategoricalIndex # noqa
from pandas.core.indexes.multi import MultiIndex # noqa
from pandas.core.indexes.interval import IntervalIndex # noqa
from pandas.core.indexes.numeric import (NumericIndex, Float64Index, # noqa
Int64Index, UInt64Index)
from pandas.core.indexes.range import RangeIndex # noqa
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.datetimes import DatetimeIndex
import pandas.core.common as com
from pandas._libs import lib, NaT
_sort_msg = textwrap.dedent("""\
Sorting because non-concatenation axis is not aligned. A future version
of pandas will change to not sort by default.
To accept the future behavior, pass 'sort=False'.
To retain the current behavior and silence the warning, pass 'sort=True'.
""")
# TODO: there are many places that rely on these private methods existing in
# pandas.core.index
__all__ = ['Index', 'MultiIndex', 'NumericIndex', 'Float64Index', 'Int64Index',
'CategoricalIndex', 'IntervalIndex', 'RangeIndex', 'UInt64Index',
'InvalidIndexError', 'TimedeltaIndex',
'PeriodIndex', 'DatetimeIndex',
'_new_Index', 'NaT',
'ensure_index', 'ensure_index_from_sequences',
'_get_combined_index',
'_get_objs_combined_axis', '_union_indexes',
'_get_consensus_names',
'_all_indexes_same']
def _get_objs_combined_axis(objs, intersect=False, axis=0, sort=True):
"""
Extract combined index: return intersection or union (depending on the
value of "intersect") of indexes on given axis, or None if all objects
lack indexes (e.g. they are numpy arrays).
Parameters
----------
objs : list of objects
Each object will only be considered if it has a _get_axis
attribute.
intersect : bool, default False
If True, calculate the intersection between indexes. Otherwise,
calculate the union.
axis : {0 or 'index', 1 or 'outer'}, default 0
The axis to extract indexes from.
sort : bool, default True
Whether the result index should come out sorted or not.
Returns
-------
Index
"""
obs_idxes = [obj._get_axis(axis) for obj in objs
if hasattr(obj, '_get_axis')]
if obs_idxes:
return _get_combined_index(obs_idxes, intersect=intersect, sort=sort)
def _get_distinct_objs(objs):
"""
Return a list with distinct elements of "objs" (different ids).
Preserves order.
"""
ids = set()
res = []
for obj in objs:
if not id(obj) in ids:
ids.add(id(obj))
res.append(obj)
return res
def _get_combined_index(indexes, intersect=False, sort=False):
"""
Return the union or intersection of indexes.
Parameters
----------
indexes : list of Index or list objects
When intersect=True, do not accept list of lists.
intersect : bool, default False
If True, calculate the intersection between indexes. Otherwise,
calculate the union.
sort : bool, default False
Whether the result index should come out sorted or not.
Returns
-------
Index
"""
# TODO: handle index names!
indexes = _get_distinct_objs(indexes)
if len(indexes) == 0:
index = Index([])
elif len(indexes) == 1:
index = indexes[0]
elif intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.intersection(other)
else:
index = _union_indexes(indexes, sort=sort)
index = ensure_index(index)
if sort:
try:
index = index.sort_values()
except TypeError:
pass
return index
def _union_indexes(indexes, sort=True):
"""
Return the union of indexes.
The behavior of sort and names is not consistent.
Parameters
----------
indexes : list of Index or list objects
sort : bool, default True
Whether the result index should come out sorted or not.
Returns
-------
Index
"""
if len(indexes) == 0:
raise AssertionError('Must have at least 1 Index to union')
if len(indexes) == 1:
result = indexes[0]
if isinstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds):
"""
Convert indexes to lists and concatenate them, removing duplicates.
The final dtype is inferred.
Parameters
----------
inds : list of Index or list objects
Returns
-------
Index
"""
def conv(i):
if isinstance(i, Index):
i = i.tolist()
return i
return Index(
lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort))
if kind == 'special':
result = indexes[0]
if hasattr(result, 'union_many'):
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == 'array':
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
if sort is None:
# TODO: remove once pd.concat sort default changes
warnings.warn(_sort_msg, FutureWarning, stacklevel=8)
sort = True
return _unique_indices(indexes)
name = _get_consensus_names(indexes)[0]
if name != index.name:
index = index._shallow_copy(name=name)
return index
else: # kind='list'
return _unique_indices(indexes)
def _sanitize_and_check(indexes):
"""
Verify the type of indexes and convert lists to Index.
Cases:
- [list, list, ...]: Return ([list, list, ...], 'list')
- [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
Lists are sorted and converted to Index.
- [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
TYPE = 'special' if at least one special type, 'array' otherwise.
Parameters
----------
indexes : list of Index or list objects
Returns
-------
sanitized_indexes : list of Index or list objects
type : {'list', 'array', 'special'}
"""
kinds = list({type(index) for index in indexes})
if list in kinds:
if len(kinds) > 1:
indexes = [Index(com.try_sort(x))
if not isinstance(x, Index) else
x for x in indexes]
kinds.remove(list)
else:
return indexes, 'list'
if len(kinds) > 1 or Index not in kinds:
return indexes, 'special'
else:
return indexes, 'array'
def _get_consensus_names(indexes):
"""
Give a consensus 'names' to indexes.
If there's exactly one non-empty 'names', return this,
otherwise, return empty.
Parameters
----------
indexes : list of Index objects
Returns
-------
list
A list representing the consensus 'names' found.
"""
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = {tuple(i.names) for i in indexes
if com._any_not_none(*i.names)}
if len(consensus_names) == 1:
return list(list(consensus_names)[0])
return [None] * indexes[0].nlevels
def _all_indexes_same(indexes):
"""
Determine if all indexes contain the same elements.
Parameters
----------
indexes : list of Index objects
Returns
-------
bool
True if all indexes contain the same elements, False otherwise.
"""
first = indexes[0]
for index in indexes[1:]:
if not first.equals(index):
return False
return True
| bsd-3-clause |
csherwood-usgs/landlab | setup.py | 1 | 5852 | #! /usr/bin/env python
#from ez_setup import use_setuptools
#use_setuptools()
from setuptools import setup, find_packages, Extension
from setuptools.command.install import install
from setuptools.command.develop import develop
from distutils.extension import Extension
import sys
ext_modules = [
Extension('landlab.ca.cfuncs',
['landlab/ca/cfuncs.pyx']),
Extension('landlab.grid.cfuncs',
['landlab/grid/cfuncs.pyx']),
Extension('landlab.components.flexure.cfuncs',
['landlab/components/flexure/cfuncs.pyx']),
Extension('landlab.components.flow_accum.cfuncs',
['landlab/components/flow_accum/cfuncs.pyx']),
Extension('landlab.components.flow_director.cfuncs',
['landlab/components/flow_director/cfuncs.pyx']),
Extension('landlab.components.stream_power.cfuncs',
['landlab/components/stream_power/cfuncs.pyx']),
Extension('landlab.components.drainage_density.cfuncs',
['landlab/components/drainage_density/cfuncs.pyx']),
Extension('landlab.utils.ext.jaggedarray',
['landlab/utils/ext/jaggedarray.pyx']),
Extension('landlab.graph.structured_quad.ext.at_node',
['landlab/graph/structured_quad/ext/at_node.pyx']),
Extension('landlab.graph.structured_quad.ext.at_link',
['landlab/graph/structured_quad/ext/at_link.pyx']),
Extension('landlab.graph.structured_quad.ext.at_patch',
['landlab/graph/structured_quad/ext/at_patch.pyx']),
Extension('landlab.graph.structured_quad.ext.at_cell',
['landlab/graph/structured_quad/ext/at_cell.pyx']),
Extension('landlab.graph.structured_quad.ext.at_face',
['landlab/graph/structured_quad/ext/at_face.pyx']),
Extension('landlab.graph.hex.ext.hex',
['landlab/graph/hex/ext/hex.pyx']),
Extension('landlab.graph.sort.ext.remap_element',
['landlab/graph/sort/ext/remap_element.pyx']),
Extension('landlab.graph.sort.ext.argsort',
['landlab/graph/sort/ext/argsort.pyx']),
Extension('landlab.graph.sort.ext.spoke_sort',
['landlab/graph/sort/ext/spoke_sort.pyx']),
Extension('landlab.graph.voronoi.ext.voronoi',
['landlab/graph/voronoi/ext/voronoi.pyx']),
Extension('landlab.graph.voronoi.ext.delaunay',
['landlab/graph/voronoi/ext/delaunay.pyx']),
Extension('landlab.graph.object.ext.at_node',
['landlab/graph/object/ext/at_node.pyx']),
Extension('landlab.graph.object.ext.at_patch',
['landlab/graph/object/ext/at_patch.pyx']),
Extension('landlab.graph.quantity.ext.of_link',
['landlab/graph/quantity/ext/of_link.pyx']),
Extension('landlab.graph.quantity.ext.of_patch',
['landlab/graph/quantity/ext/of_patch.pyx']),
Extension('landlab.graph.matrix.ext.matrix',
['landlab/graph/matrix/ext/matrix.pyx']),
Extension('landlab.grid.structured_quad.cfuncs',
['landlab/grid/structured_quad/cfuncs.pyx']),
Extension('landlab.grid.structured_quad.c_faces',
['landlab/grid/structured_quad/c_faces.pyx']),
]
import numpy as np
from landlab import __version__
def register(**kwds):
import httplib, urllib
data = urllib.urlencode(kwds)
header = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
conn = httplib.HTTPConnection('csdms.colorado.edu')
conn.request('POST', '/register/', data, header)
def register_landlab():
try:
from sys import argv
import platform
data = {
'name': 'landlab',
'version': __version__,
'platform': platform.platform(),
'desc': ';'.join(argv),
}
register(**data)
except Exception:
pass
class install_and_register(install):
def run(self):
install.run(self)
register_landlab()
class develop_and_register(develop):
def run(self):
develop.run(self)
register_landlab()
import os
#cython_pathspec = os.path.join('landlab', 'components','**','*.pyx')
#ext_modules = cythonize(cython_pathspec)
setup(name='landlab',
version=__version__,
author='Eric Hutton',
author_email='eric.hutton@colorado.edu',
url='https://github.com/landlab',
description='Plugin-based component modeling tool.',
long_description=open('README.rst').read(),
install_requires=['scipy>=0.12',
'nose>=1.3',
'matplotlib',
'sympy',
'pandas',
'six',
'pyyaml',
'netCDF4',
'xarray',
],
# 'Cython>=0.22'],
setup_requires=['cython'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Cython',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Physics'
],
packages=find_packages(),
package_data={'': ['tests/*txt', 'data/*asc', 'data/*nc',
'preciptest.in']},
test_suite='nose.collector',
cmdclass={
'install': install_and_register,
'develop': develop_and_register,
},
entry_points={
'console_scripts': [
'landlab=landlab.cmd.landlab:main',
]
},
include_dirs = [np.get_include()],
ext_modules = ext_modules,
)
| mit |
wateraccounting/wa | Generator/Sheet4/main.py | 1 | 13145 | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Function/Four
"""
# import general python modules
import os
import numpy as np
import pandas as pd
from netCDF4 import Dataset
def Calculate(WA_HOME_folder, Basin, P_Product, ET_Product, LAI_Product, ETref_Product, Runoff_Product, Startdate, Enddate, Simulation):
"""
This functions is the main framework for calculating sheet 4.
Parameters
----------
Basin : str
Name of the basin
P_Product : str
Name of the rainfall product that will be used
ET_Product : str
Name of the evapotranspiration product that will be used
LAI_Product : str
Name of the LAI product that will be used
Runoff_Product : str
Name of the Runoff product that will be used
Moving_Averiging_Length, int
Defines the length of the moving average
Startdate : str
Contains the start date of the model 'yyyy-mm-dd'
Enddate : str
Contains the end date of the model 'yyyy-mm-dd'
Simulation : int
Defines the simulation
"""
######################### Import WA modules ###################################
from wa.General import raster_conversions as RC
from wa.General import data_conversions as DC
import wa.Functions.Four as Four
import wa.Functions.Start as Start
import wa.Generator.Sheet4 as Generate
import wa.Functions.Start.Get_Dictionaries as GD
######################### Set General Parameters ##############################
# Get environmental variable for the Home folder
if WA_HOME_folder == '':
WA_env_paths = os.environ["WA_HOME"].split(';')
Dir_Home = WA_env_paths[0]
else:
Dir_Home = WA_HOME_folder
# Create the Basin folder
Dir_Basin = os.path.join(Dir_Home, Basin)
output_dir = os.path.join(Dir_Basin, "Simulations", "Simulation_%d" %Simulation)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Get the boundaries of the basin based on the shapefile of the watershed
# Boundaries, Shape_file_name_shp = Start.Boundaries.Determine(Basin)
Boundaries, Example_dataset = Start.Boundaries.Determine_LU_Based(Basin, Dir_Home)
# Find the maximum moving window value
ET_Blue_Green_Classes_dict, Moving_Window_Per_Class_dict = GD.get_bluegreen_classes(version = '1.0')
Additional_Months_tail = np.max(Moving_Window_Per_Class_dict.values())
############## Cut dates into pieces if it is needed ######################
# Check the years that needs to be calculated
years = range(int(Startdate.split('-')[0]),int(Enddate.split('-')[0]) + 1)
for year in years:
# Create .nc file if not exists
nc_outname = os.path.join(output_dir, "%d.nc" % year)
if not os.path.exists(nc_outname):
DC.Create_new_NC_file(nc_outname, Example_dataset, Basin)
# Open variables in netcdf
fh = Dataset(nc_outname)
Variables_NC = [var for var in fh.variables]
fh.close()
# Create Start and End date for time chunk
Startdate_part = '%d-01-01' %int(year)
Enddate_part = '%s-12-31' %int(year)
if int(year) == int(years[0]):
Startdate_Moving_Average = pd.Timestamp(Startdate) - pd.DateOffset(months = Additional_Months_tail)
Startdate_Moving_Average_String = Startdate_Moving_Average.strftime('%Y-%m-%d')
else:
Startdate_Moving_Average_String = Startdate_part
############################# Download Data ###################################
# Download data
if not "Precipitation" in Variables_NC:
Data_Path_P_Monthly = Start.Download_Data.Precipitation(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, P_Product)
if not "Actual_Evapotranspiration" in Variables_NC:
Data_Path_ET = Start.Download_Data.Evapotranspiration(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_part, Enddate_part, ET_Product)
if not "Reference_Evapotranspiration" in Variables_NC:
Data_Path_ETref = Start.Download_Data.ETreference(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Startdate_Moving_Average_String, Enddate_part, ETref_Product)
if not "Grey_Water_Footprint" in Variables_NC:
Data_Path_GWF = Start.Download_Data.GWF(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']])
if not "Theta_Saturated_Topsoil" in Variables_NC:
Data_Path_ThetaSat_topsoil = Start.Download_Data.Soil_Properties(Dir_Basin, [Boundaries['Latmin'],Boundaries['Latmax']],[Boundaries['Lonmin'],Boundaries['Lonmax']], Para = 'ThetaSat_TopSoil')
###################### Save Data as netCDF files ##############################
#______________________________Precipitation_______________________________
# 1.) Precipitation data
if not "Precipitation" in Variables_NC:
# Get the data of Precipitation and save as nc
DataCube_Prec = RC.Get3Darray_time_series_monthly(Data_Path_P_Monthly, Startdate_part, Enddate_part, Example_data = Example_dataset)
DC.Add_NC_Array_Variable(nc_outname, DataCube_Prec, "Precipitation", "mm/month", 0.01)
del DataCube_Prec
#_______________________Reference Evaporation______________________________
# 2.) Reference Evapotranspiration data
if not "Reference_Evapotranspiration" in Variables_NC:
# Get the data of Precipitation and save as nc
DataCube_ETref = RC.Get3Darray_time_series_monthly(Data_Path_ETref, Startdate_part, Enddate_part, Example_data = Example_dataset)
DC.Add_NC_Array_Variable(nc_outname, DataCube_ETref, "Reference_Evapotranspiration", "mm/month", 0.01)
del DataCube_ETref
#_______________________________Evaporation________________________________
# 3.) Evapotranspiration data
if not "Actual_Evapotranspiration" in Variables_NC:
# Get the data of Evaporation and save as nc
DataCube_ET = RC.Get3Darray_time_series_monthly(Data_Path_ET, Startdate_part, Enddate_part, Example_data = Example_dataset)
DC.Add_NC_Array_Variable(nc_outname, DataCube_ET, "Actual_Evapotranspiration", "mm/month", 0.01)
del DataCube_ET
#_____________________________________GWF__________________________________
# 4.) Grey Water Footprint data
if not "Grey_Water_Footprint" in Variables_NC:
# Get the data of grey water footprint and save as nc
GWF_Filepath = os.path.join(Dir_Basin, Data_Path_GWF, "Gray_Water_Footprint_Fraction.tif")
dest_GWF = RC.reproject_dataset_example(GWF_Filepath, Example_dataset, method=1)
DataCube_GWF = dest_GWF.GetRasterBand(1).ReadAsArray()
DC.Add_NC_Array_Static(nc_outname, DataCube_GWF, "Grey_Water_Footprint", "fraction", 0.0001)
del DataCube_GWF
####################### Calculations Sheet 4 ##############################
############## Cut dates into pieces if it is needed ######################
years = range(int(Startdate.split('-')[0]),int(Enddate.split('-')[0]) + 1)
for year in years:
if len(years) > 1.0:
if year is years[0]:
Startdate_part = Startdate
Enddate_part = '%s-12-31' %year
if year is years[-1]:
Startdate_part = '%s-01-01' %year
Enddate_part = Enddate
else:
Startdate_part = Startdate
Enddate_part = Enddate
#____________ Evapotranspiration data split in ETblue and ETgreen ____________
if not ("Blue_Evapotranspiration" in Variables_NC or "Green_Evapotranspiration" in Variables_NC):
# Calculate Blue and Green ET
DataCube_ETblue, DataCube_ETgreen = Four.SplitET.Blue_Green(Dir_Basin, nc_outname, ETref_Product, P_Product, Startdate, Enddate)
DC.Add_NC_Array_Variable(nc_outname, DataCube_ETblue, "Blue_Evapotranspiration", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, DataCube_ETgreen, "Green_Evapotranspiration", "mm/month", 0.01)
del DataCube_ETblue, DataCube_ETgreen
#____________ Calculate non-consumend and Total supply maps by using fractions and consumed maps (blue ET) ____________
if not ("Total_Supply" in Variables_NC or "Non_Consumed_Water" in Variables_NC):
# Do the calculations
DataCube_Total_Supply, DataCube_Non_Consumed = Four.Total_Supply.Fraction_Based(nc_outname, Startdate_part, Enddate_part)
# Save the Total Supply and non consumed data as NetCDF files
DC.Add_NC_Array_Variable(nc_outname, DataCube_Total_Supply, "Total_Supply", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, DataCube_Non_Consumed, "Non_Consumed_Water", "mm/month", 0.01)
del DataCube_Total_Supply, DataCube_Non_Consumed
#____________ Apply fractions over total supply to calculate gw and sw supply ____________
if not ("Total_Supply_Surface_Water" in Variables_NC or "Total_Supply_Ground_Water" in Variables_NC):
# Do the calculations
DataCube_Total_Supply_SW, DataCube_Total_Supply_GW = Four.SplitGW_SW_Supply.Fraction_Based(nc_outname, Startdate_part, Enddate_part)
# Save the Total Supply surface water and Total Supply ground water data as NetCDF files
DC.Add_NC_Array_Variable(nc_outname, DataCube_Total_Supply_SW, "Total_Supply_Surface_Water", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, DataCube_Total_Supply_GW, "Total_Supply_Ground_Water", "mm/month", 0.01)
del DataCube_Total_Supply_SW, DataCube_Total_Supply_GW
#____________ Apply gray water footprint fractions to calculated non recoverable flow based on the non consumed flow ____________
if not ("Non_Recovable_Flow" in Variables_NC or "Recovable_Flow" in Variables_NC):
# Calculate the non recovable flow and recovable flow by using Grey Water Footprint values
DataCube_NonRecovableFlow, Datacube_RecovableFlow = Four.SplitNonConsumed_NonRecov.GWF_Based(nc_outname, Startdate_part, Enddate_part)
# Get the data of Evaporation and save as nc
DC.Add_NC_Array_Variable(nc_outname, DataCube_NonRecovableFlow, "Non_Recovable_Flow", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, Datacube_RecovableFlow, "Recovable_Flow", "mm/month", 0.01)
del DataCube_NonRecovableFlow, Datacube_RecovableFlow
#____________Apply fractions to calculate the non recovarable SW/GW and recovarable SW/GW ____________
# 1. Non recovarable flow
if not ("Non_Recovable_Flow_Ground_Water" in Variables_NC or "Non_Recovable_Flow_Surface_Water" in Variables_NC):
# Calculate the non recovable return flow to ground and surface water
DataCube_NonRecovableFlow_Return_GW, Datacube_NonRecovableFlow_Return_SW = Four.SplitGW_SW_Return.Fraction_Based(nc_outname, "Non_Recovable_Flow", Startdate_part, Enddate_part)
# Get the data of Evaporation and save as nc
DC.Add_NC_Array_Variable(nc_outname, DataCube_NonRecovableFlow_Return_GW, "Non_Recovable_Flow_Ground_Water", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, Datacube_NonRecovableFlow_Return_SW, "Non_Recovable_Flow_Surface_Water", "mm/month", 0.01)
del DataCube_NonRecovableFlow_Return_GW, Datacube_NonRecovableFlow_Return_SW
# 2. Recovarable flow
if not ("Recovable_Flow_Ground_Water" in Variables_NC or "Recovable_Flow_Surface_Water" in Variables_NC):
# Calculate the non recovable return flow to ground and surface water
DataCube_RecovableFlow_Return_GW, Datacube_RecovableFlow_Return_SW = Four.SplitGW_SW_Return.Fraction_Based(nc_outname, "Recovable_Flow", Startdate_part, Enddate_part)
# Get the data of Evaporation and save as nc
DC.Add_NC_Array_Variable(nc_outname, DataCube_RecovableFlow_Return_GW, "Recovable_Flow_Ground_Water", "mm/month", 0.01)
DC.Add_NC_Array_Variable(nc_outname, Datacube_RecovableFlow_Return_SW, "Recovable_Flow_Surface_Water", "mm/month", 0.01)
del DataCube_RecovableFlow_Return_GW, Datacube_RecovableFlow_Return_SW
############################ Create CSV 4 #################################
Dir_Basin_CSV, Unit_front = Generate.CSV.Create(Dir_Basin, Simulation, Basin, Startdate_part, Enddate_part, nc_outname)
############################ Create Sheet 4 ###############################
Generate.PDF.Create(Dir_Basin, Basin, Simulation, Dir_Basin_CSV, Unit_front)
return()
| apache-2.0 |
joshbohde/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 1 | 3177 | import numpy as np
from numpy.testing import assert_almost_equal
from sklearn import neighbors, manifold
from sklearn.utils.fixes import product
eigen_solvers = ['dense', 'arpack']
def assert_lower(a, b, details=None):
message = "%r is not lower than %r" % (a, b)
if details is not None:
message += ": " + details
assert a < b, message
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
rng = np.random.RandomState(0)
# grid of equidistant points in 2D, out_dim = n_dim
X = np.array(list(product(range(5), repeat=2)))
out_dim = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5, out_dim=out_dim)
tol = .1
N = neighbors.kneighbors_graph(
X, clf.n_neighbors, mode='barycenter').todense()
reconstruction_error = np.linalg.norm(np.dot(N, X) - X, 'fro')
assert_lower(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert clf.embedding_.shape[1] == out_dim
reconstruction_error = np.linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
# FIXME: ARPACK fails this test ...
if solver != 'arpack':
assert_lower(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=4)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_lower(np.linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
# similar test on a slightly more complex manifold
X = np.array(list(product(range(20), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 20]
out_dim = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5, out_dim=out_dim)
tol = 1.5
N = neighbors.kneighbors_graph(X, clf.n_neighbors,
mode='barycenter').toarray()
reconstruction_error = np.linalg.norm(np.dot(N, X) - X)
assert_lower(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert clf.embedding_.shape[1] == out_dim
reconstruction_error = np.linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = "solver: " + solver
assert_lower(reconstruction_error, tol, details=details)
assert_lower(np.abs(clf.reconstruction_error_ - reconstruction_error),
tol * reconstruction_error, details=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
from sklearn import pipeline, datasets
iris = datasets.load_iris()
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding()),
('clf', neighbors.NeighborsClassifier())])
clf.fit(iris.data, iris.target)
assert_lower(.7, clf.score(iris.data, iris.target))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
raghavrv/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 33 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
# #############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
blueshiftofdeath/imessage-counter | imessageCounter.py | 1 | 6291 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime, sqlite3, os
from datetime import timedelta
from itertools import groupby
from matplotlib.dates import DayLocator, MonthLocator, WeekdayLocator
from matplotlib.dates import AutoDateFormatter, DateFormatter
import argparse
#convert timestamp into datetime object
def std_time(stamp):
basetime_offset = datetime.datetime(2000, 12, 31, 0, 0, 0)
timezone_offset = 1
date = basetime_offset + timedelta(timezone_offset, stamp)
return datetime.date(date.year, date.month, date.day)
#take list of (date, wordCount) tuples and combine tuples with same date
#summing wordCounts
def std_list(L):
L = groupby(L, key = lambda x: x[0])
counts = {}
for k,g in L:
wordCount = sum(map(lambda x: x[1], g))
if k in counts:
counts[k] += wordCount
else:
counts[k] = wordCount
return sorted(list(map(lambda k: (k, counts[k]), counts.keys())))
#target list contains all dates that source contains
def fill_points(target, source):
target_points = set(map(lambda row: row[0], target))
for point in source:
if point[0] not in target_points:
target.append((point[0], 0))
target.sort()
def handleFormat(handles):
if type(handles) == int:
return handles
else:
return " OR handle_id = ".join(list(map(str, handles)))
def zoom(data, start, end):
if start is not None:
start = datetime.date(start[0], start[1], start[2])
data = filter(lambda row: row[0] >= start, data)
if end is not None:
end = datetime.date(end[0], end[1], end[2])
data = filter(lambda row: row[0] <= end, data)
return sorted(list(data))
def queryMessages(handle, dbName, wordsToCount, split, direction, start, end):
conn = sqlite3.connect(dbName)
c = conn.cursor()
c.execute(
"""
SELECT date, text, is_from_me
FROM message
WHERE handle_id = {}
""".format(handleFormat(handle))
)
result = c.fetchall()
def word_count(text):
if text is None:
return 0
words = text.split()
if wordsToCount is not None:
count = 0
for w in wordsToCount:
if " " in w:
count += text.count(w)
else:
count += len(list(filter(lambda x: x.lower() == w, words)))
return count
else:
return len(words)
result = zoom(map(lambda row: (std_time(row[0]), word_count(row[1]), row[2]), result), start, end)
if split:
me = std_list([row[:2] for row in result if row[2] == 1])
other = std_list([row[:2] for row in result if row[2] == 0])
fill_points(me, other)
fill_points(other, me)
return (me, other)
else:
if direction is not None:
return std_list([row[:2] for row in result if row[2] == (direction == 'to')])
else:
return std_list([row[:2] for row in result])
def addPlot(ax, dataList):
(label, L) = dataList
dates = [q[0] for q in L]
count = [q[1] for q in L]
ax.plot_date(dates, count, '-', label=label)
def plot(dataLists, interval):
fig, ax = plt.subplots()
for dataList in dataLists:
addPlot(ax, dataList)
if type(interval) == int:
# every day
locator = DayLocator(interval=interval)
locatorFmt = AutoDateFormatter(locator)
else:
# every month
locator = MonthLocator(range(1, 13), bymonthday=1, interval=1)
locatorFmt = DateFormatter("%b '%y")
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(locatorFmt)
ax.xaxis.set_label_text('date')
ax.yaxis.set_label_text('words')
plt.legend()
plt.title('words sent per day by text')
ax.autoscale_view()
#ax.xaxis.grid(False, 'major')
#ax.xaxis.grid(True, 'minor')
ax.grid(True)
fig.autofmt_xdate()
plt.show()
def getHandles(address, dbName):
conn = sqlite3.connect(dbName)
c = conn.cursor()
c.execute(
"""
SELECT ROWID
FROM handle
WHERE id = "{}"
""".format(address)
)
return list(map(lambda x: x[0], c.fetchall()))
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--people', metavar='people', type=str, nargs='*',
help='name and address of person to put in legend,\
where address is phone number or email address used in chat',
required=False)
parser.add_argument('--words', metavar='words', type=str, nargs='*',
help='words to count', required=False)
parser.add_argument('--interval', metavar='interval', type=int,
help='interval of days to label', required=False)
parser.add_argument('--start', metavar='start', type=int, nargs=3,
help='first day to plot', required=False)
parser.add_argument('--end', metavar='end', type=int, nargs=3,
help='last day to plot', required=False)
parser.add_argument('-split', action='store_true',
help='split count by who sends', required=False)
parser.add_argument('-direction', choices=['to','from'],
help='only show given direction', required=False)
args = parser.parse_args()
path = os.path.expanduser('~') + '/Library/Messages/chat.db'
if args.interval is not None:
interval = args.interval
else:
interval = "month"
if args.people is not None:
data = []
for i in range(len(args.people)/2):
label = args.people[2*i]
address = args.people[2*i+1]
datum = queryMessages(getHandles(address, path),
path, args.words, args.split, args.direction,
args.start, args.end)
if args.split:
(me, other) = datum
data.append(("me to {}".format(label), me))
data.append(("{} to me".format(label), other))
else:
if args.direction is not None:
label = "sent {} {}".format(args.direction, label)
data.append((label, datum))
plot(data, interval)
| mit |
cython-testbed/pandas | pandas/tests/series/test_alter_axes.py | 1 | 11229 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime
import numpy as np
from pandas import Series, DataFrame, Index, MultiIndex, RangeIndex
from pandas.compat import lrange, range, zip
import pandas.util.testing as tm
class TestSeriesAlterAxes(object):
def test_setindex(self, string_series):
# wrong type
pytest.raises(TypeError, setattr, string_series, 'index', None)
# wrong length
pytest.raises(Exception, setattr, string_series, 'index',
np.arange(len(string_series) - 1))
# works
string_series.index = np.arange(len(string_series))
assert isinstance(string_series.index, Index)
# Renaming
def test_rename(self, datetime_series):
ts = datetime_series
renamer = lambda x: x.strftime('%Y%m%d')
renamed = ts.rename(renamer)
assert renamed.index[0] == renamer(ts.index[0])
# dict
rename_dict = dict(zip(ts.index, renamed.index))
renamed2 = ts.rename(rename_dict)
tm.assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')
renamed = s.rename({'b': 'foo', 'd': 'bar'})
tm.assert_index_equal(renamed.index, Index(['a', 'foo', 'c', 'bar']))
# index with name
renamer = Series(np.arange(4),
index=Index(['a', 'b', 'c', 'd'], name='name'),
dtype='int64')
renamed = renamer.rename({})
assert renamed.index.name == renamer.index.name
def test_rename_by_series(self):
s = Series(range(5), name='foo')
renamer = Series({1: 10, 2: 20})
result = s.rename(renamer)
expected = Series(range(5), index=[0, 10, 20, 3, 4], name='foo')
tm.assert_series_equal(result, expected)
def test_rename_set_name(self):
s = Series(range(4), index=list('abcd'))
for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:
result = s.rename(name)
assert result.name == name
tm.assert_numpy_array_equal(result.index.values, s.index.values)
assert s.name is None
def test_rename_set_name_inplace(self):
s = Series(range(3), index=list('abc'))
for name in ['foo', 123, 123., datetime(2001, 11, 11), ('foo',)]:
s.rename(name, inplace=True)
assert s.name == name
exp = np.array(['a', 'b', 'c'], dtype=np.object_)
tm.assert_numpy_array_equal(s.index.values, exp)
def test_rename_axis_supported(self):
# Supporting axis for compatibility, detailed in GH-18589
s = Series(range(5))
s.rename({}, axis=0)
s.rename({}, axis='index')
with tm.assert_raises_regex(ValueError, 'No axis named 5'):
s.rename({}, axis=5)
def test_set_name_attribute(self):
s = Series([1, 2, 3])
s2 = Series([1, 2, 3], name='bar')
for name in [7, 7., 'name', datetime(2001, 1, 1), (1,), u"\u05D0"]:
s.name = name
assert s.name == name
s2.name = name
assert s2.name == name
def test_set_name(self):
s = Series([1, 2, 3])
s2 = s._set_name('foo')
assert s2.name == 'foo'
assert s.name is None
assert s is not s2
def test_rename_inplace(self, datetime_series):
renamer = lambda x: x.strftime('%Y%m%d')
expected = renamer(datetime_series.index[0])
datetime_series.rename(renamer, inplace=True)
assert datetime_series.index[0] == expected
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(lrange(10))
s.index = idx
assert s.index.is_all_dates
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ['hash', 'category']
ser.name = 'value'
df = ser.reset_index()
assert 'value' in df
df = ser.reset_index(name='value2')
assert 'value2' in df
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
s2.reset_index(drop=True, inplace=True)
tm.assert_series_equal(s, s2)
# level
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
assert len(rs.columns) == 2
rs = s.reset_index(level=[0, 2], drop=True)
tm.assert_index_equal(rs.index, Index(index.get_level_values(1)))
assert isinstance(rs, Series)
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]],
columns=['A', 'B', 'C'])
for levels in ['A', 'B'], [0, 1]:
# With MultiIndex
s = df.set_index(['A', 'B'])['C']
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index('B'))
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index('B'))
result = s.reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(['A', 'B']).reset_index(level=levels,
drop=True)
tm.assert_frame_equal(result, df[['C']])
with tm.assert_raises_regex(KeyError, 'Level E '):
s.reset_index(level=['A', 'E'])
# With single-level Index
s = df.set_index('A')['B']
result = s.reset_index(level=levels[0])
tm.assert_frame_equal(result, df[['A', 'B']])
result = s.reset_index(level=levels[:1])
tm.assert_frame_equal(result, df[['A', 'B']])
result = s.reset_index(level=levels[0], drop=True)
tm.assert_series_equal(result, df['B'])
with tm.assert_raises_regex(IndexError, 'Too many levels'):
s.reset_index(level=[0, 1, 2])
# Check that .reset_index([],drop=True) doesn't fail
result = Series(range(4)).reset_index([], drop=True)
expected = Series(range(4))
tm.assert_series_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
s = Series(range(2), name='A', dtype='int64')
series_result = s.reset_index()
assert isinstance(series_result.index, RangeIndex)
series_expected = DataFrame([[0, 0], [1, 1]],
columns=['index', 'A'],
index=RangeIndex(stop=2))
tm.assert_frame_equal(series_result, series_expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
tm.assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(['L0', 'L1', 'L2'])
tm.assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = Series(np.arange(6), index=e_idx)
tm.assert_series_equal(result, expected)
def test_rename_axis_inplace(self, datetime_series):
# GH 15704
expected = datetime_series.rename_axis('foo')
result = datetime_series
no_return = result.rename_axis('foo', inplace=True)
assert no_return is None
tm.assert_series_equal(result, expected)
def test_set_axis_inplace_axes(self, axis_series):
# GH14636
ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')
expected = ser.copy()
expected.index = list('abcd')
# inplace=True
# The FutureWarning comes from the fact that we would like to have
# inplace default to False some day
for inplace, warn in [(None, FutureWarning), (True, None)]:
result = ser.copy()
kwargs = {'inplace': inplace}
with tm.assert_produces_warning(warn):
result.set_axis(list('abcd'), axis=axis_series, **kwargs)
tm.assert_series_equal(result, expected)
def test_set_axis_inplace(self):
# GH14636
s = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')
expected = s.copy()
expected.index = list('abcd')
# inplace=False
result = s.set_axis(list('abcd'), axis=0, inplace=False)
tm.assert_series_equal(expected, result)
# omitting the "axis" parameter
with tm.assert_produces_warning(None):
result = s.set_axis(list('abcd'), inplace=False)
tm.assert_series_equal(result, expected)
# wrong values for the "axis" parameter
for axis in [2, 'foo']:
with tm.assert_raises_regex(ValueError, 'No axis named'):
s.set_axis(list('abcd'), axis=axis, inplace=False)
def test_set_axis_prior_to_deprecation_signature(self):
s = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64')
expected = s.copy()
expected.index = list('abcd')
for axis in [0, 'index']:
with tm.assert_produces_warning(FutureWarning):
result = s.set_axis(0, list('abcd'), inplace=False)
tm.assert_series_equal(result, expected)
def test_reset_index_drop_errors(self):
# GH 20925
# KeyError raised for series index when passed level name is missing
s = Series(range(4))
with tm.assert_raises_regex(KeyError, 'must be same as name'):
s.reset_index('wrong', drop=True)
with tm.assert_raises_regex(KeyError, 'must be same as name'):
s.reset_index('wrong')
# KeyError raised for series when level to be dropped is missing
s = Series(range(4), index=MultiIndex.from_product([[1, 2]] * 2))
with tm.assert_raises_regex(KeyError, 'not found'):
s.reset_index('wrong', drop=True)
def test_droplevel(self):
# GH20342
ser = Series([1, 2, 3, 4])
ser.index = MultiIndex.from_arrays([(1, 2, 3, 4), (5, 6, 7, 8)],
names=['a', 'b'])
expected = ser.reset_index('b', drop=True)
result = ser.droplevel('b', axis='index')
tm.assert_series_equal(result, expected)
# test that droplevel raises ValueError on axis != 0
with pytest.raises(ValueError):
ser.droplevel(1, axis='columns')
| bsd-3-clause |
ssaeger/scikit-learn | sklearn/utils/tests/test_multiclass.py | 34 | 13405 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
eickenberg/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 1 | 33972 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import numpy as np
import warnings
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
"""Check input parameter validation."""
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
def test_classification_synthetic():
"""Test GradientBoostingClassifier on synthetic dataset used by
Hastie et al. in ESLII Example 12.7. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.085, \
"GB failed with error %.4f" % error_rate
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, \
"Stochastic GB failed with error %.4f" % error_rate
def test_boston():
"""Check consistency on dataset boston house prices with least squares
and least absolute deviation. """
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
def test_iris():
"""Check consistency on dataset iris."""
for subsample in (1.0, 0.5):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
"""Test on synthetic regression datasets used in Leo Breiman,
`Bagging Predictors?. Machine Learning 24(2): 123-140 (1996). """
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability():
"""Predict probabilities."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
"""Test input checks (shape and type of X and y)."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
def test_check_inputs_predict():
"""X has wrong shape """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
"""test if max_features is valid. """
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
"""Test to make sure random state is set properly. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
"""Test if max features is set properly for floats and str. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
def test_staged_predict():
"""Test whether staged decision function eventually gives
the same prediction.
"""
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
"""Test whether staged predict proba eventually gives
the same prediction.
"""
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_serialization():
"""Check model serialization."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
"""Check if we can fit even though all targets are equal. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
"""Check if quantile loss with alpha=0.5 equals lad. """
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
"""Test with non-integer class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
"""Test with float class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
"""Test with float class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
"""Test with different memory layouts of X and y"""
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_score():
"""Test if oob_score is deprecated. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_warns(DeprecationWarning, hasattr, clf, 'oob_score_')
def test_oob_improvement():
"""Test if oob improvement has correct shape and regression test. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
"""Test if oob improvement has correct shape. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
"""Check OOB improvement on multi-class dataset."""
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
"""Check verbose=1 does not cause error. """
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
"""Check verbose=2 does not cause error. """
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warn_deviance():
"""Test if mdeviance and bdeviance give deprecated warning. """
for loss in ('bdeviance', 'mdeviance'):
with warnings.catch_warnings(record=True) as w:
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
warnings.simplefilter("always", DataConversionWarning)
clf = GradientBoostingClassifier(loss=loss)
try:
clf.fit(X, y)
except:
# mdeviance will raise ValueError because only 2 classes
pass
# deprecated warning for bdeviance and mdeviance
assert len(w) == 1
def test_warm_start():
"""Test if warm start equals fit. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
"""Test if warm start equals fit - set n_estimators. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
"""Test if possible to fit trees of differet depth in ensemble. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
"""Test if fit clears state. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
"""Test if warm start with zero n_estimators raises error """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
"""Test if warm start with smaller n_estimators raises error """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
"""Test if warm start with equal n_estimators does nothing """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
"""Test if oob can be turned on during warm start. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
"""Test if warm start OOB equals fit. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
"""Test if monitor return value works. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
assert_equal(est._oob_score_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
assert_equal(est._oob_score_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
"""Test greedy trees with max_depth + 1 leafs. """
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k+1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
"""Test greedy trees with max_depth + 1 leafs. """
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k+1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
"""Test if ZeroEstimator works for regression. """
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
"""Test if ZeroEstimator works for classification. """
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
"""Test preceedence of max_leaf_nodes over max_depth. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
ZGainsforth/MultiLaue | BasicProcessing.py | 1 | 7851 | # Created 2016, Zack Gainsforth
import matplotlib
#matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
import numpy as np
import h5py
import multiprocessing
import time
def LoadScan(HDF5FileName, readwrite=False):
# Read the HDF file
if readwrite == False:
f = h5py.File(HDF5FileName, 'r') # , swmr=True)
else:
f = h5py.File(HDF5FileName, 'r+')#, swmr=True)
# Make sure this is a version we can read.
assert f.attrs['MultiLaueVersion'] == '1.0', 'MultiLaue only supports version 1.0 currently.'
# Get the scan group.
Scan = f['Scan']
# Ensure this scan is using a beamline configuration we can process.
assert Scan.attrs['Synchrotron'] == 'ALS', 'MultiLaue only supports ALS beamline 12.3.2 with the Pilatus detector currently'
assert Scan.attrs['BeamLine'] == '12.3.2', 'MultiLaue only supports ALS beamline 12.3.2 with the Pilatus detector currently'
assert Scan.attrs['Detector'] == 'Pilatus', 'MultiLaue only supports ALS beamline 12.3.2 with the Pilatus detector currently'
# And right now we only do Laue and MultiLaue.
assert Scan.attrs['ScanType'] in ['Mono', 'Laue', 'MultiLaue'], 'MultiLaue only supports Mono, Laue and MultiLaue scans currently'
# Done.
return f, Scan
def MakeSumImageParallelY(RowNumber, NumberOfData, DataQueue, DoneQueue):
x = []
for i in range(NumberOfData):
x.append(DataQueue.get())
print('Proc %d got %d images' % (RowNumber, len(x)))
Sum = np.sum(np.array(x), axis=0)
DoneQueue.put(Sum)
return
def MakeSumImage(Scan, StatusFunc=None):
# I originally coded this as single core, and then made a multicore version using multiprocessing. However, the
# procedure is IO bound so I am putting it back to single core. I am leaving the multicore code here because I
# may need it for the more CPU intensive peak fitting and MultiLaue stuff.
# MultiStart = time.time()
#
# f, Scan = LoadScan('GRA95229_mLaue_6.hdf5')
# Cube = Scan['DataCube']
#
# # Make a sum image with the dimensions of the last two dimensions of the cube (image size)
# Sum = np.zeros(Cube.shape[2:])
#
# jobs = []
# DoneQueues = []
#
# # Now process all the images into it!
# for y in range(Cube.shape[1]):
# print 'Making process %d'%y
# DataQueue = multiprocessing.Queue()
# DoneQueue = multiprocessing.Queue()
# DoneQueues.append(DoneQueue)
# for x in range(Cube.shape[0]):
# DataQueue.put(Cube[x, y, :, :])
# p = multiprocessing.Process(target=MakeSumImageParallelY, args=(y, Cube.shape[0], DataQueue, DoneQueue))
# jobs.append(p)
# p.start()
# #Sum += MakeSumImageParallelY('GRA95229_mLaue_6.hdf5', y)
#
# f.close()
#
# PartialSums = []
#
# for DoneQueue in DoneQueues:
# PartialSums.append(DoneQueue.get())
#
# Sum = np.sum(np.array(PartialSums), axis=0)
#
# print np.max(np.max(Sum))
#
# SumLog1 = CleanLog(Sum)
# #plt.imshow(SumLog, interpolation='none', cmap='gray')
# #plt.show()
#
# MultiTime = time.time() - MultiStart
#SingleStart = time.time()
Cube = Scan['DataCube']
# Make a sum image with the dimensions of the image.
Sum = np.zeros(Cube.shape[2:4])
# Now process all the images into it!
for y in range(Cube.shape[1]):
for x in range(Cube.shape[0]):
StatusStr = "Sum image: x=%d, y=%d, Pixel # %d of %d" % (x, y, y * Cube.shape[0] + x + 1, Cube.shape[0] * Cube.shape[1])
if StatusFunc is not None:
StatusFunc(StatusStr)
else:
print(StatusStr)
if Scan.attrs['ScanType'] in ['Laue', 'Mono']:
# The sum image of a Laue scan is the sum of each frame.
Sum += Cube[x, y, :, :]
elif Scan.attrs['ScanType'] == 'MultiLaue':
# MultiLaue sums are just the sum of each frame with the first filter position.
Sum += Cube[x, y, :, :, 0]
SumLog = CleanLog(Sum)
# # plt.imshow(SumLog, interpolation='none', cmap='gray')
# # plt.show()
#
# SingleTime = time.time() - SingleStart
#
# print 'Multicore time: %g' % MultiTime
# print 'Singlecore time: %g' % SingleTime
# print 'Multicore/SingleCore = %g' % (MultiTime/SingleTime)
#
# print 'Numerical Difference: %g' % np.sum(np.sum(SumLog-SumLog1))
return Sum, SumLog
def MakeStDevImage(Scan, StatusFunc=None):
""" This produces the standard deviation image and expects the Sum image to be already populated."""
Cube = Scan['DataCube']
Sum = Scan['SumImage']
# Number of pixels in the map. StDev is computed relative to the mean, i.e. SumImage / N.
N = Cube.shape[0]*Cube.shape[1]
MeanImage = Sum[:,:]/N
StDev = np.zeros(Sum.shape)
# Now process all the images as the square terms.
for y in range(Cube.shape[1]):
for x in range(Cube.shape[0]):
StatusStr = "StDev image: x=%d, y=%d, Pixel # %d of %d" % (x, y, y * Cube.shape[0] + x + 1, Cube.shape[0] * Cube.shape[1])
if StatusFunc is not None:
StatusFunc(StatusStr)
else:
print(StatusStr)
if Scan.attrs['ScanType'] in ['Laue', 'Mono']:
StDev += (Cube[x, y, :, :] - MeanImage) ** 2
elif Scan.attrs['ScanType'] == 'MultiLaue':
# Same as for Laue/mono except we use only the first filter position.
StDev += (Cube[x, y, :, :, 0] - MeanImage) ** 2
# N-1 in case of low number of pixels.
StDev = np.sqrt(StDev / (N-1))
StDevLog = CleanLog(StDev)
return StDev, StDevLog
def MakeTopographFromCoordinate(Scan, CoordIn):
Cube = Scan['DataCube']
Coord = ConvertCanvasCoordinatesToDataCoordinates(CoordIn)
Topo = np.zeros(Cube.shape[0:2])
for y in range(Cube.shape[1]):
for x in range(Cube.shape[0]):
if Scan.attrs['ScanType'] in ['Laue', 'Mono']:
Topo[x,y] += Cube[x, y, Coord[0], Coord[1]]
elif Scan.attrs['ScanType'] == 'MultiLaue':
# Same as for Laue/mono except we use only the first filter position.
Topo[x, y] += Cube[x, y, Coord[0], Coord[1], 0]
return Topo
def GetSingleImageFromTopographCoordinate(Scan, CoordIn):
Cube = Scan['DataCube']
Coord = ConvertCanvasCoordinatesToDataCoordinates(CoordIn)
if Scan.attrs['ScanType'] in ['Laue', 'Mono']:
SingleImage = Cube[Coord[0], Coord[1], :, :]
EnergyImage = None
EnergyFitImage = None
elif Scan.attrs['ScanType'] == 'MultiLaue':
SingleImage = Cube[Coord[0], Coord[1], :, :, 0]
EnergyImage = Scan['EnergyCube'][Coord[0], Coord[1], :, :]
EnergyFitImage = Scan['EnergyFitValCube'][Coord[0], Coord[1], :, :]
return SingleImage, EnergyImage, EnergyFitImage
def ConvertCanvasCoordinatesToDataCoordinates(CanvasCoordinate):
c = np.array(CanvasCoordinate)
# The data is stored as mxn instead of x,y so transpose the coordinates.
# The canvas reports each value at the center of the pixels instead of the corner, so we also add an offset of 1/2.
c[0] = np.floor(CanvasCoordinate[1] + 0.5)
c[1] = np.floor(CanvasCoordinate[0] + 0.5)
return c
def CleanLog(Val):
# Returns a log of an image, but without infinities or nans.
LogVal = np.log(Val)
LogVal[np.isinf(LogVal)] = 0
LogVal = np.nan_to_num(LogVal)
return LogVal
if __name__ == '__main__':
f, Scan = LoadScan('GRA95229_mLaue_7.hdf5', readwrite=True)
Sum, SumLog = MakeSumImage(Scan)
Scan.create_dataset('SumImage', data=Sum)
# Scan.create_dataset('SumImage', data=Sum)
StDev, StDevLog = MakeStDevImage(Scan)
f.close()
| epl-1.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/numpy/lib/polynomial.py | 32 | 37972 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
if len(x) <= order + 2:
raise ValueError("the number of data points must exceed order + 2 "
"for Bayesian estimate the covariance matrix")
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| apache-2.0 |
UNR-AERIAL/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
Dapid/GPy | GPy/models/bayesian_gplvm.py | 8 | 10126 | # Copyright (c) 2012 - 2014 the GPy Austhors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .. import kern
from ..core.sparse_gp_mpi import SparseGP_MPI
from ..likelihoods import Gaussian
from ..core.parameterization.variational import NormalPosterior, NormalPrior
from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch
import logging
class BayesianGPLVM(SparseGP_MPI):
"""
Bayesian Gaussian Process Latent Variable Model
:param Y: observed data (np.ndarray) or GPy.likelihood
:type Y: np.ndarray| GPy.likelihood instance
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
"""
def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10,
Z=None, kernel=None, inference_method=None, likelihood=None,
name='bayesian gplvm', mpi_comm=None, normalizer=None,
missing_data=False, stochastic=False, batchsize=1, Y_metadata=None):
self.logger = logging.getLogger(self.__class__.__name__)
if X is None:
from ..util.initialization import initialize_latent
self.logger.info("initializing latent space X with method {}".format(init))
X, fracs = initialize_latent(init, input_dim, Y)
else:
fracs = np.ones(input_dim)
self.init = init
if X_variance is None:
self.logger.info("initializing latent space variance ~ uniform(0,.1)")
X_variance = np.random.uniform(0,.1,X.shape)
if Z is None:
self.logger.info("initializing inducing inputs")
Z = np.random.permutation(X.copy())[:num_inducing]
assert Z.shape[1] == X.shape[1]
if kernel is None:
self.logger.info("initializing kernel RBF")
kernel = kern.RBF(input_dim, lengthscale=1./fracs, ARD=True) #+ kern.Bias(input_dim) + kern.White(input_dim)
if likelihood is None:
likelihood = Gaussian()
self.variational_prior = NormalPrior()
X = NormalPosterior(X, X_variance)
if inference_method is None:
if mpi_comm is not None:
inference_method = VarDTC_minibatch(mpi_comm=mpi_comm)
else:
from ..inference.latent_function_inference.var_dtc import VarDTC
self.logger.debug("creating inference_method var_dtc")
inference_method = VarDTC(limit=1 if not missing_data else Y.shape[1])
if isinstance(inference_method,VarDTC_minibatch):
inference_method.mpi_comm = mpi_comm
super(BayesianGPLVM,self).__init__(X, Y, Z, kernel, likelihood=likelihood,
name=name, inference_method=inference_method,
normalizer=normalizer, mpi_comm=mpi_comm,
variational_prior=self.variational_prior,
Y_metadata=Y_metadata
)
self.link_parameter(self.X, index=0)
def set_X_gradients(self, X, X_grad):
"""Set the gradients of the posterior distribution of X in its specific form."""
X.mean.gradient, X.variance.gradient = X_grad
def get_X_gradients(self, X):
"""Get the gradients of the posterior distribution of X in its specific form."""
return X.mean.gradient, X.variance.gradient
def parameters_changed(self):
super(BayesianGPLVM,self).parameters_changed()
if isinstance(self.inference_method, VarDTC_minibatch):
return
kl_fctr = 1.
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)
self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(
variational_posterior=self.X,
Z=self.Z,
dL_dpsi0=self.grad_dict['dL_dpsi0'],
dL_dpsi1=self.grad_dict['dL_dpsi1'],
dL_dpsi2=self.grad_dict['dL_dpsi2'])
self.variational_prior.update_gradients_KL(self.X)
#super(BayesianGPLVM, self).parameters_changed()
#self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
#self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2'])
# This is testing code -------------------------
# i = np.random.randint(self.X.shape[0])
# X_ = self.X.mean
# which = np.sqrt(((X_ - X_[i:i+1])**2).sum(1)).argsort()>(max(0, self.X.shape[0]-51))
# _, _, grad_dict = self.inference_method.inference(self.kern, self.X[which], self.Z, self.likelihood, self.Y[which], self.Y_metadata)
# grad = self.kern.gradients_qX_expectations(variational_posterior=self.X[which], Z=self.Z, dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2'])
#
# self.X.mean.gradient[:] = 0
# self.X.variance.gradient[:] = 0
# self.X.mean.gradient[which] = grad[0]
# self.X.variance.gradient[which] = grad[1]
# update for the KL divergence
# self.variational_prior.update_gradients_KL(self.X, which)
# -----------------------------------------------
# update for the KL divergence
#self.variational_prior.update_gradients_KL(self.X)
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True,
plot_limits=None,
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
def do_test_latents(self, Y):
"""
Compute the latent representation for a set of new points Y
Notes:
This will only work with a univariate Gaussian likelihood (for now)
"""
N_test = Y.shape[0]
input_dim = self.Z.shape[1]
means = np.zeros((N_test, input_dim))
covars = np.zeros((N_test, input_dim))
dpsi0 = -0.5 * self.input_dim / self.likelihood.variance
dpsi2 = self.grad_dict['dL_dpsi2'][0][None, :, :] # TODO: this may change if we ignore het. likelihoods
V = Y/self.likelihood.variance
#compute CPsi1V
#if self.Cpsi1V is None:
# psi1V = np.dot(self.psi1.T, self.likelihood.V)
# tmp, _ = linalg.dtrtrs(self._Lm, np.asfortranarray(psi1V), lower=1, trans=0)
# tmp, _ = linalg.dpotrs(self.LB, tmp, lower=1)
# self.Cpsi1V, _ = linalg.dtrtrs(self._Lm, tmp, lower=1, trans=1)
dpsi1 = np.dot(self.posterior.woodbury_vector, V.T)
#start = np.zeros(self.input_dim * 2)
from scipy.optimize import minimize
for n, dpsi1_n in enumerate(dpsi1.T[:, :, None]):
args = (input_dim, self.kern.copy(), self.Z, dpsi0, dpsi1_n.T, dpsi2)
res = minimize(latent_cost_and_grad, jac=True, x0=np.hstack((means[n], covars[n])), args=args, method='BFGS')
xopt = res.x
mu, log_S = xopt.reshape(2, 1, -1)
means[n] = mu[0].copy()
covars[n] = np.exp(log_S[0]).copy()
X = NormalPosterior(means, covars)
return X
def dmu_dX(self, Xnew):
"""
Calculate the gradient of the prediction at Xnew w.r.t Xnew.
"""
dmu_dX = np.zeros_like(Xnew)
for i in range(self.Z.shape[0]):
dmu_dX += self.kern.gradients_X(self.grad_dict['dL_dpsi1'][i:i + 1, :], Xnew, self.Z[i:i + 1, :])
return dmu_dX
def dmu_dXnew(self, Xnew):
"""
Individual gradient of prediction at Xnew w.r.t. each sample in Xnew
"""
gradients_X = np.zeros((Xnew.shape[0], self.num_inducing))
ones = np.ones((1, 1))
for i in range(self.Z.shape[0]):
gradients_X[:, i] = self.kern.gradients_X(ones, Xnew, self.Z[i:i + 1, :]).sum(-1)
return np.dot(gradients_X, self.grad_dict['dL_dpsi1'])
def plot_steepest_gradient_map(self, *args, ** kwargs):
"""
See GPy.plotting.matplot_dep.dim_reduction_plots.plot_steepest_gradient_map
"""
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_steepest_gradient_map(self,*args,**kwargs)
def latent_cost_and_grad(mu_S, input_dim, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2):
"""
objective function for fitting the latent variables for test points
(negative log-likelihood: should be minimised!)
"""
mu = mu_S[:input_dim][None]
log_S = mu_S[input_dim:][None]
S = np.exp(log_S)
X = NormalPosterior(mu, S)
psi0 = kern.psi0(Z, X)
psi1 = kern.psi1(Z, X)
psi2 = kern.psi2(Z, X)
lik = dL_dpsi0 * psi0.sum() + np.einsum('ij,kj->...', dL_dpsi1, psi1) + np.einsum('ijk,lkj->...', dL_dpsi2, psi2) - 0.5 * np.sum(np.square(mu) + S) + 0.5 * np.sum(log_S)
dLdmu, dLdS = kern.gradients_qX_expectations(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, X)
dmu = dLdmu - mu
# dS = S0 + S1 + S2 -0.5 + .5/S
dlnS = S * (dLdS - 0.5) + .5
return -lik, -np.hstack((dmu.flatten(), dlnS.flatten()))
| bsd-3-clause |
marcusrehm/serenata-de-amor | research/src/fetch_yelp_info.py | 2 | 5299 | import json
import requests
import re
import os.path
import datetime
from unicodedata import normalize
from decouple import config
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
"""
Get your API access token
1. Create an Yelp account.
2. Create an app (https://www.yelp.com/developers/v3/manage_app).
3. Run this command in your terminal to get yout access_token:
curl -X POST -F 'client_id=YOUR_CLIENT_ID' -F 'client_secret=YOUT_CLIENT_SECRET' https://api.yelp.com/oauth2/token
4. Get your 'access_token' from the response and add it as an environment variable.
"""
def companies():
# Loading reimbursements
docs = pd.read_csv(REIMBURSEMENTS_DATASET_PATH,
low_memory=False,
dtype={'cnpj_cpf': np.str})
# Filtering only congressperson meals
meal_docs = docs[docs.subquota_description == 'Congressperson meal']
# Storing only unique CNPJs
meal_cnpjs = meal_docs['cnpj_cpf'].unique()
# Loading companies
all_companies = pd.read_csv(COMPANIES_DATASET_PATH,
low_memory=False,
dtype={'trade_name': np.str})
all_companies = all_companies[all_companies['trade_name'].notnull()]
# Cleaning up companies CNPJs
all_companies['clean_cnpj'] = \
all_companies['cnpj'].map(lambda cnpj: cnpj.replace(
'.', '').replace('/', '').replace('-', ''))
# Filtering only companies that are in meal reimbursements
return all_companies[all_companies['clean_cnpj'].isin(meal_cnpjs)]
def find_newest_file(name):
date_regex = re.compile('\d{4}-\d{2}-\d{2}')
matches = (date_regex.findall(f) for f in os.listdir(DATA_DIR))
dates = sorted(set([l[0] for l in matches if l]), reverse=True)
for date in dates:
filename = '{}-{}.xz'.format(date, name)
filepath = os.path.join(DATA_DIR, filename)
if os.path.isfile(filepath):
return filepath
def remaining_companies(fetched_companies, companies):
return companies[~companies['cnpj'].isin(fetched_companies['cnpj'])]
def load_companies_dataset():
if os.path.exists(YELP_DATASET_PATH):
return pd.read_csv(YELP_DATASET_PATH)
else:
return pd.DataFrame(columns=['cnpj'])
def parse_fetch_info(response):
if response.status_code == 200:
json = response.json()
results = json['businesses']
if results:
return results[0]
else:
print('Response ==>', response.status_code)
def write_fetched_companies(companies):
companies.to_csv(YELP_DATASET_PATH,
compression='xz',
index=False)
# ----------------------------
# Request to yelp API getting by trade name and address
# https://www.yelp.com/developers/documentation/v3/business_search
def fetch_yelp_info(**params):
url = 'https://api.yelp.com/v3/businesses/search'
authorization = "Bearer {}".format(config('YELP_ACCESS_TOKEN'))
headers = {"Authorization": authorization}
response = requests.get(url, headers=headers, params=params)
return parse_fetch_info(response)
def standardize_name(name):
new_name = normalize('NFKD', name).encode(
'ASCII', 'ignore').decode('utf-8')
return set(new_name.lower().split(' '))
DATA_DIR = 'data'
REIMBURSEMENTS_DATASET_PATH = find_newest_file('reimbursements')
COMPANIES_DATASET_PATH = find_newest_file('companies')
YELP_DATASET_PATH = os.path.join('data', 'yelp-companies.xz')
if __name__ == '__main__':
companies_w_meal_expense = companies()
fetched_companies = load_companies_dataset()
companies_to_fetch = remaining_companies(
fetched_companies, companies_w_meal_expense).reset_index()
for index, company in companies_to_fetch.iterrows():
print('%s: Fetching %s - City: %s' %
(index, company['trade_name'], company['city']))
fetched_company = fetch_yelp_info(term=company['trade_name'],
location='BR',
latitude=company['latitude'],
longitude=company['longitude'])
is_good_result = False
if fetched_company:
expected_name = standardize_name(company['trade_name'])
result_name = standardize_name(fetched_company['name'])
is_good_result = len(
expected_name - result_name) / len(expected_name) < .3
if is_good_result:
print('Successfuly matched %s' % fetched_company['name'])
else:
print('Not found')
fetched_company = {}
record = json_normalize(fetched_company)
record['scraped_at'] = datetime.datetime.utcnow().isoformat()
record['trade_name'] = company['trade_name']
record['cnpj'] = company['cnpj']
fetched_companies = pd.concat([fetched_companies, record])
if (index % 100) == 0 and index > 0:
print('###########################################')
print("%s requests made. Stopping to save." % index)
write_fetched_companies(fetched_companies)
print('###########################################')
write_fetched_companies(fetched_companies)
| mit |
ManuSchmi88/landlab | setup.py | 1 | 5818 | #! /usr/bin/env python
#from ez_setup import use_setuptools
#use_setuptools()
from setuptools import setup, find_packages, Extension
from setuptools.command.install import install
from setuptools.command.develop import develop
from distutils.extension import Extension
import sys
ext_modules = [
Extension('landlab.ca.cfuncs',
['landlab/ca/cfuncs.pyx']),
Extension('landlab.grid.cfuncs',
['landlab/grid/cfuncs.pyx']),
Extension('landlab.components.flexure.cfuncs',
['landlab/components/flexure/cfuncs.pyx']),
Extension('landlab.components.flow_accum.cfuncs',
['landlab/components/flow_accum/cfuncs.pyx']),
Extension('landlab.components.flow_director.cfuncs',
['landlab/components/flow_director/cfuncs.pyx']),
Extension('landlab.components.stream_power.cfuncs',
['landlab/components/stream_power/cfuncs.pyx']),
Extension('landlab.components.drainage_density.cfuncs',
['landlab/components/drainage_density/cfuncs.pyx']),
Extension('landlab.utils.ext.jaggedarray',
['landlab/utils/ext/jaggedarray.pyx']),
Extension('landlab.graph.structured_quad.ext.at_node',
['landlab/graph/structured_quad/ext/at_node.pyx']),
Extension('landlab.graph.structured_quad.ext.at_link',
['landlab/graph/structured_quad/ext/at_link.pyx']),
Extension('landlab.graph.structured_quad.ext.at_patch',
['landlab/graph/structured_quad/ext/at_patch.pyx']),
Extension('landlab.graph.structured_quad.ext.at_cell',
['landlab/graph/structured_quad/ext/at_cell.pyx']),
Extension('landlab.graph.structured_quad.ext.at_face',
['landlab/graph/structured_quad/ext/at_face.pyx']),
Extension('landlab.graph.hex.ext.hex',
['landlab/graph/hex/ext/hex.pyx']),
Extension('landlab.graph.sort.ext.remap_element',
['landlab/graph/sort/ext/remap_element.pyx']),
Extension('landlab.graph.sort.ext.argsort',
['landlab/graph/sort/ext/argsort.pyx']),
Extension('landlab.graph.sort.ext.spoke_sort',
['landlab/graph/sort/ext/spoke_sort.pyx']),
Extension('landlab.graph.voronoi.ext.voronoi',
['landlab/graph/voronoi/ext/voronoi.pyx']),
Extension('landlab.graph.voronoi.ext.delaunay',
['landlab/graph/voronoi/ext/delaunay.pyx']),
Extension('landlab.graph.object.ext.at_node',
['landlab/graph/object/ext/at_node.pyx']),
Extension('landlab.graph.object.ext.at_patch',
['landlab/graph/object/ext/at_patch.pyx']),
Extension('landlab.graph.quantity.ext.of_link',
['landlab/graph/quantity/ext/of_link.pyx']),
Extension('landlab.graph.quantity.ext.of_patch',
['landlab/graph/quantity/ext/of_patch.pyx']),
Extension('landlab.graph.matrix.ext.matrix',
['landlab/graph/matrix/ext/matrix.pyx']),
Extension('landlab.grid.structured_quad.cfuncs',
['landlab/grid/structured_quad/cfuncs.pyx']),
Extension('landlab.grid.structured_quad.c_faces',
['landlab/grid/structured_quad/c_faces.pyx']),
]
import numpy as np
from landlab import __version__
def register(**kwds):
import httplib, urllib
data = urllib.urlencode(kwds)
header = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
conn = httplib.HTTPConnection('csdms.colorado.edu')
conn.request('POST', '/register/', data, header)
def register_landlab():
try:
from sys import argv
import platform
data = {
'name': 'landlab',
'version': __version__,
'platform': platform.platform(),
'desc': ';'.join(argv),
}
register(**data)
except Exception:
pass
class install_and_register(install):
def run(self):
install.run(self)
register_landlab()
class develop_and_register(develop):
def run(self):
develop.run(self)
register_landlab()
import os
#cython_pathspec = os.path.join('landlab', 'components','**','*.pyx')
#ext_modules = cythonize(cython_pathspec)
setup(name='landlab',
version=__version__,
author='Eric Hutton',
author_email='eric.hutton@colorado.edu',
url='https://github.com/landlab',
description='Plugin-based component modeling tool.',
long_description=open('README.rst').read(),
install_requires=['scipy>=0.12',
'nose>=1.3',
'matplotlib',
'sympy',
'pandas',
'six',
'pyyaml',
'netCDF4',
],
# 'Cython>=0.22'],
setup_requires=['cython'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Cython',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Physics'
],
packages=find_packages(),
package_data={'': ['tests/*txt', 'data/*asc', 'data/*nc',
'preciptest.in']},
test_suite='nose.collector',
cmdclass={
'install': install_and_register,
'develop': develop_and_register,
},
entry_points={
'console_scripts': [
'landlab=landlab.cmd.landlab:main',
]
},
include_dirs = [np.get_include()],
ext_modules = ext_modules,
)
| mit |
alvarofierroclavero/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
faner-father/tushare | tushare/stock/classifying.py | 11 | 8914 | # -*- coding:utf-8 -*-
"""
获取股票分类数据接口
Created on 2015/02/01
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
import pandas as pd
from tushare.stock import cons as ct
from tushare.stock import ref_vars as rv
import json
import re
from pandas.util.testing import _network_error_classes
import time
import tushare.stock.fundamental as fd
from tushare.util.netbase import Client
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_industry_classified():
"""
获取行业分类数据
Return
--------
DataFrame
code :股票代码
name :股票名称
c_name :行业名称
"""
df = _get_type_data(ct.SINA_INDUSTRY_INDEX_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['ids']))
data = []
ct._write_head()
for row in df.values:
rowDf = _get_detail(row[0])
rowDf['c_name'] = row[1]
data.append(rowDf)
data = pd.concat(data, ignore_index=True)
return data
def get_concept_classified():
"""
获取概念分类数据
Return
--------
DataFrame
code :股票代码
name :股票名称
c_name :概念名称
"""
ct._write_head()
df = _get_type_data(ct.SINA_CONCEPTS_INDEX_URL%(ct.P_TYPE['http'],
ct.DOMAINS['sf'], ct.PAGES['cpt']))
data = []
for row in df.values:
rowDf = _get_detail(row[0])
rowDf['c_name'] = row[1]
data.append(rowDf)
data = pd.concat(data,ignore_index=True)
return data
def get_area_classified():
"""
获取地域分类数据
Return
--------
DataFrame
code :股票代码
name :股票名称
area :地域名称
"""
df = fd.get_stock_basics()
df = df[['name', 'area']]
df.reset_index(level=0, inplace=True)
df = df.sort('area').reset_index(drop=True)
return df
def get_gem_classified():
"""
获取创业板股票
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
df = fd.get_stock_basics()
df.reset_index(level=0, inplace=True)
df = df[ct.FOR_CLASSIFY_B_COLS]
df = df.ix[df.code.str[0] == '3']
df = df.sort('code').reset_index(drop=True)
return df
def get_sme_classified():
"""
获取中小板股票
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
df = fd.get_stock_basics()
df.reset_index(level=0, inplace=True)
df = df[ct.FOR_CLASSIFY_B_COLS]
df = df.ix[df.code.str[0:3] == '002']
df = df.sort('code').reset_index(drop=True)
return df
def get_st_classified():
"""
获取风险警示板股票
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
df = fd.get_stock_basics()
df.reset_index(level=0, inplace=True)
df = df[ct.FOR_CLASSIFY_B_COLS]
df = df.ix[df.name.str.contains('ST')]
df = df.sort('code').reset_index(drop=True)
return df
def _get_detail(tag, retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
try:
ct._write_console()
request = Request(ct.SINA_DATA_DETAIL_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['jv'],
tag))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
except _network_error_classes:
pass
else:
reg = re.compile(r'\,(.*?)\:')
text = reg.sub(r',"\1":', text)
text = text.replace('"{symbol', '{"symbol')
text = text.replace('{symbol', '{"symbol"')
jstr = json.dumps(text)
js = json.loads(jstr)
df = pd.DataFrame(pd.read_json(js, dtype={'code':object}), columns=ct.THE_FIELDS)
df = df[ct.FOR_CLASSIFY_B_COLS]
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _get_type_data(url):
try:
request = Request(url)
data_str = urlopen(request, timeout=10).read()
data_str = data_str.decode('GBK')
data_str = data_str.split('=')[1]
data_json = json.loads(data_str)
df = pd.DataFrame([[row.split(',')[0], row.split(',')[1]] for row in data_json.values()],
columns=['tag', 'name'])
return df
except Exception as er:
print(str(er))
def get_hs300s():
"""
获取沪深300当前成份股及所占权重
Return
--------
DataFrame
code :股票代码
name :股票名称
date :日期
weight:权重
"""
try:
df = pd.read_excel(ct.HS300_CLASSIFY_URL%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.INDEX_C_COMM, ct.PAGES['hs300b']), parse_cols=[0,1])
df.columns = ct.FOR_CLASSIFY_B_COLS
df['code'] = df['code'].map(lambda x :str(x).zfill(6))
wt = pd.read_excel(ct.HS300_CLASSIFY_URL%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.INDEX_C_COMM, ct.PAGES['hs300w']), parse_cols=[0,4,8])
wt.columns = ct.FOR_CLASSIFY_W_COLS
wt['code'] = wt['code'].map(lambda x :str(x).zfill(6))
return pd.merge(df,wt)
except Exception as er:
print(str(er))
def get_sz50s():
"""
获取上证50成份股
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
try:
df = pd.read_excel(ct.HS300_CLASSIFY_URL%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.INDEX_C_COMM, ct.PAGES['sz50b']), parse_cols=[0,1])
df.columns = ct.FOR_CLASSIFY_B_COLS
df['code'] = df['code'].map(lambda x :str(x).zfill(6))
return df
except Exception as er:
print(str(er))
def get_zz500s():
"""
获取中证500成份股
Return
--------
DataFrame
code :股票代码
name :股票名称
"""
try:
df = pd.read_excel(ct.HS300_CLASSIFY_URL%(ct.P_TYPE['http'], ct.DOMAINS['idx'],
ct.INDEX_C_COMM, ct.PAGES['zz500b']), parse_cols=[0,1])
df.columns = ct.FOR_CLASSIFY_B_COLS
df['code'] = df['code'].map(lambda x :str(x).zfill(6))
return df
except Exception as er:
print(str(er))
def get_terminated():
"""
获取终止上市股票列表
Return
--------
DataFrame
code :股票代码
name :股票名称
oDate:上市日期
tDate:终止上市日期
"""
try:
ref = ct.SSEQ_CQ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.TERMINATED_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['ssecq'], _random(5),
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
df = pd.DataFrame(lines['result'], columns=rv.TERMINATED_T_COLS)
df.columns = rv.TERMINATED_COLS
return df
except Exception as er:
print(str(er))
def get_suspended():
"""
获取暂停上市股票列表
Return
--------
DataFrame
code :股票代码
name :股票名称
oDate:上市日期
tDate:终止上市日期
"""
try:
ref = ct.SSEQ_CQ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.SUSPENDED_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['ssecq'], _random(5),
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
df = pd.DataFrame(lines['result'], columns=rv.TERMINATED_T_COLS)
df.columns = rv.TERMINATED_COLS
return df
except Exception as er:
print(str(er))
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
| bsd-3-clause |
tkoziara/parmec | tests/spring_contact_moving_plane.py | 1 | 1441 | # PARMEC test --> SPRING contact moving plane test
matnum = MATERIAL (1E3, 1E9, 0.25)
nodes0 = [-0.5, -0.5, -0.2,
1.5, -0.5, -0.2,
1.5, 1.5, -0.2,
-0.5, 1.5, -0.2,
-0.5, -0.5, 0,
1.5, -0.5, 0,
1.5, 1.5, 0,
-0.5, 1.5, 0]
nodes1 = [0, 0, 1,
1, 0, 1,
1, 1, 1,
0, 1, 1,
0, 0, 2,
1, 0, 2,
1, 1, 2,
0, 1, 2]
elements = [8, 0, 1, 2, 3, 4, 5, 6, 7, matnum]
colors = [1, 4, 0, 1, 2, 3, 2, 4, 4, 5, 6, 7, 3]
part0 = MESH (nodes0, elements, matnum, colors)
part1 = MESH (nodes1, elements, matnum, colors)
spring = [-1, 1E7, 0, 0, 1, 0]
dashpot = [-1, -8E5, 1, 8E5]
SPRING (part1, (0, 0, 1), part0, [(0, 0, 0), (0, 0, 1)], spring, dashpot)
SPRING (part1, (1, 0, 1), part0, [(1, 0, 0), (0, 0, 1)], spring, dashpot)
SPRING (part1, (1, 1, 1), part0, [(1, 1, 0), (0, 0, 1)], spring, dashpot)
SPRING (part1, (0, 1, 1), part0, [(0, 1, 0), (0, 0, 1)], spring, dashpot)
VELOCITY (part1, linear=(0, 0, -1), angular=(0.25, 0.5, 0))
t = HISTORY ('TIME')
z = HISTORY ('PZ', part1)
h = 0.1 * CRITICAL()
print 'Time step:', h
DEM (5.0, h, (0.05, h))
print 'Generating time-z(center) plot ...'
try:
import matplotlib.pyplot as plt
plt.clf ()
plt.plot (t, z)
plt.xlim ((0, t[-1]))
plt.xlabel ('time $(s)$')
plt.ylabel ('z(center) $(m)$')
plt.title ('spring_contact_moving_plane')
plt.savefig ('tests/spring_contact_moving_plane_z.png')
except:
print 't = ', t
print 'z = ', z
| mit |
meduz/scikit-learn | sklearn/preprocessing/tests/test_data.py | 30 | 61609 |
# Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.exceptions import DataConversionWarning
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names([u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_false(s1[0] == s2[0])
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert_true(scaler.mean_ is not None)
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.nextafter(0, 1)
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [[np.nan, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [[np.inf, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
assert_raises_regex(ValueError, 'Invalid quantile range: \(',
scaler.fit, iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
# Test return_norm
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
_, norms = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
assert_raises(NotImplementedError, normalize, X_sparse,
norm=norm, return_norm=True)
_, norms = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert_true(X_bin is X_float)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_cv_pipeline_precomputed():
# Cross-validate a regression on four coplanar points with the same
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
# is treated as a _pairwise operation.
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
y_true = np.ones((4,))
K = X.dot(X.T)
kcent = KernelCenterer()
pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())])
# did the pipeline set the _pairwise attribute?
assert_true(pipeline._pairwise)
# test cross-validation, score should be almost perfect
# NB: this test is pretty vacuous -- it's mainly to test integration
# of Pipeline and KernelCenterer
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
assert_array_almost_equal(y_true, y_pred)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
error_msg = "unknown categorical feature present \[2\] during transform."
assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def test_transform_selected_copy_arg():
# transformer that alters X
def _mutating_transformer(X):
X[0, 0] = X[0, 0] + 1
return X
original_X = np.asarray([[1, 2], [3, 4]])
expected_Xtr = [[2, 2], [3, 4]]
X = original_X.copy()
Xtr = _transform_selected(X, _mutating_transformer, copy=True,
selected='all')
assert_array_equal(toarray(X), toarray(original_X))
assert_array_equal(toarray(Xtr), expected_Xtr)
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]]))
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
| bsd-3-clause |
antkillerfarm/antkillerfarm_crazy | python/ml/keras/hello_gan1.py | 1 | 8363 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
import keras.backend as K
from keras.datasets import mnist
from keras.layers import *
from keras.models import *
from keras.optimizers import *
from keras.initializers import *
from keras.utils.generic_utils import Progbar
GPU = "0"
RUN = '10'
OUT_DIR = 'out/' + RUN
TENSORBOARD_DIR = 'tensorboard/wgans/' + RUN
# latent vector size
Z_SIZE = 100
# number of iterations D is trained for per each G iteration
D_ITERS = 5
EPOCHS = 100
BATCH_SIZE = 100
# use specific GPU
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = GPU
if not os.path.isdir(OUT_DIR): os.makedirs(OUT_DIR)
np.random.seed(777)
K.set_image_dim_ordering('tf')
# basically return mean(y_pred),
# but with ability to inverse it for minimization (when y_true == -1)
def wasserstein_dist(y_true, y_pred):
return K.mean(y_true * y_pred)
def create_D():
# weights are initlaized from normal distribution with below params
weight_init = RandomNormal(mean=0., stddev=0.02)
input_image = Input(shape=(28, 28, 1), name='input_image')
x = Conv2D(
32, (3, 3),
padding='same',
name='conv_1',
kernel_initializer=weight_init)(input_image)
x = LeakyReLU()(x)
x = MaxPool2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Conv2D(
64, (3, 3),
padding='same',
name='conv_2',
kernel_initializer=weight_init)(x)
x = MaxPool2D(pool_size=1)(x)
x = LeakyReLU()(x)
x = Dropout(0.3)(x)
x = Conv2D(
128, (3, 3),
padding='same',
name='conv_3',
kernel_initializer=weight_init)(x)
x = MaxPool2D(pool_size=2)(x)
x = LeakyReLU()(x)
x = Dropout(0.3)(x)
x = Conv2D(
256, (3, 3),
padding='same',
name='coonv_4',
kernel_initializer=weight_init)(x)
x = MaxPool2D(pool_size=1)(x)
x = LeakyReLU()(x)
x = Dropout(0.3)(x)
features = Flatten()(x)
output_is_fake = Dense(
1, activation='linear', name='output_is_fake')(features)
output_class = Dense(
10, activation='softmax', name='output_class')(features)
return Model(
inputs=[input_image], outputs=[output_is_fake, output_class], name='D')
def create_G(Z_SIZE=Z_SIZE):
DICT_LEN = 10
EMBEDDING_LEN = Z_SIZE
# weights are initlaized from normal distribution with below params
weight_init = RandomNormal(mean=0., stddev=0.02)
# class#
input_class = Input(shape=(1, ), dtype='int32', name='input_class')
# encode class# to the same size as Z to use hadamard multiplication later on
e = Embedding(
DICT_LEN, EMBEDDING_LEN,
embeddings_initializer='glorot_uniform')(input_class)
embedded_class = Flatten(name='embedded_class')(e)
# latent var
input_z = Input(shape=(Z_SIZE, ), name='input_z')
# hadamard product
h = multiply([input_z, embedded_class], name='h')
# cnn part
x = Dense(1024)(h)
x = LeakyReLU()(x)
x = Dense(128 * 7 * 7)(x)
x = LeakyReLU()(x)
x = Reshape((7, 7, 128))(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(256, (5, 5), padding='same', kernel_initializer=weight_init)(x)
x = LeakyReLU()(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(128, (5, 5), padding='same', kernel_initializer=weight_init)(x)
x = LeakyReLU()(x)
x = Conv2D(
1, (2, 2),
padding='same',
activation='tanh',
name='output_generated_image',
kernel_initializer=weight_init)(x)
return Model(inputs=[input_z, input_class], outputs=x, name='G')
# with tf.device('/gpu:0'):
D = create_D()
D.compile(
optimizer=RMSprop(lr=0.00005),
loss=[wasserstein_dist, 'sparse_categorical_crossentropy'])
input_z = Input(shape=(Z_SIZE, ), name='input_z_')
input_class = Input(shape=(1, ),name='input_class_', dtype='int32')
# with tf.device('/gpu:0'):
G = create_G()
# create combined D(G) model
# D.trainable = False
output_is_fake, output_class = D(G(inputs=[input_z, input_class]))
DG = Model(inputs=[input_z, input_class], outputs=[output_is_fake, output_class])
DG.get_layer('D').trainable = False # freeze D in generator training faze
DG.compile(
optimizer=RMSprop(lr=0.00005),
loss=[wasserstein_dist, 'sparse_categorical_crossentropy']
)
# get our mnist data, and force it to be of shape (..., 28, 28, 1) with
# range [-1., 1.]
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=3)
X_test = (X_test.astype(np.float32) - 127.5) / 127.5
X_test = np.expand_dims(X_test, axis=3)
train_num_samples, test_num_samples = X_train.shape[0], X_test.shape[0]
train_history = defaultdict(list)
test_history = defaultdict(list)
def save_generated_samples(epoch):
# new batch of z's
zz = np.random.normal(0., 1., (100, Z_SIZE))
# new batch of classes [0..9]
generated_classes = np.array(list(range(0, 10)) * 10, dtype=np.int32)
generated_images = G.predict([zz, generated_classes.reshape(-1, 1)])
rr = []
for c in range(10):
rr.append(
np.concatenate(generated_images[c * 10:(1 + c) * 10]).reshape(
280, 28))
img = np.hstack(rr)
plt.imsave(OUT_DIR + '/generated-e%d.png' % epoch, img, cmap=plt.cm.gray)
# fake = 1
# real = -1
for epoch in range(EPOCHS):
print('\nEpoch {} / {}:'.format(epoch + 1, EPOCHS))
batches_num = int(train_num_samples / BATCH_SIZE)
progress_bar = Progbar(target=batches_num)
epoch_DG_losses = []
epoch_D1_losses = []
epoch_D2_losses = []
for batch_i in range(batches_num):
if len(epoch_D1_losses) > 0:
progress_bar.update(
batch_i,
values=[
('D1_is_fake', np.mean(epoch_D1_losses[-5:], axis=0)[1]),
('D1_class', np.mean(epoch_D1_losses[-5:], axis=0)[2]),
('D2_is_fake', np.mean(epoch_D2_losses[-5:], axis=0)[1]),
('D2_class', np.mean(epoch_D2_losses[-5:], axis=0)[2]),
('D(G)_is_fake', np.mean(epoch_DG_losses[-5:],axis=0)[1]),
('D(G)_class', np.mean(epoch_DG_losses[-5:],axis=0)[2])
]
)
else:
progress_bar.update(batch_i)
# 1: train D on real+generated images
if batch_i < 25 or batch_i % 500 == 0:
d_iters = 100
else:
d_iters = D_ITERS
for d_it in range(d_iters):
D.trainable = True
for l in D.layers: l.trainable = True
# clip D weights
for l in D.layers:
weights = l.get_weights()
weights = [np.clip(w, -0.01, 0.01) for w in weights]
l.set_weights(weights)
# 1.1: maximize D output on reals === minimize -1*(D(real))
real_images = X_train[batch_i * BATCH_SIZE:(batch_i + 1) * BATCH_SIZE]
real_images_classes = y_train[batch_i * BATCH_SIZE:(batch_i + 1) * BATCH_SIZE]
D_loss = D.train_on_batch(real_images, [-np.ones(BATCH_SIZE), real_images_classes])
epoch_D1_losses.append(D_loss)
# 1.2: minimize D output on fakes
zz = np.random.normal(0., 1., (BATCH_SIZE, Z_SIZE))
generated_classes = np.random.randint(0, 10, BATCH_SIZE)
generated_images = G.predict([zz, generated_classes.reshape(-1, 1)])
D_loss = D.train_on_batch(generated_images, [np.ones(BATCH_SIZE), generated_classes])
epoch_D2_losses.append(D_loss)
# 2: train D(G) (D is frozen)
# minimize D output while supplying it with fakes, telling it that they are reals (-1)
D.trainable = False
for l in D.layers: l.trainable = False
zz = np.random.normal(0., 1., (BATCH_SIZE, Z_SIZE))
generated_classes = np.random.randint(0, 10, BATCH_SIZE)
DG_loss = DG.train_on_batch(
[zz, generated_classes.reshape((-1, 1))],
[-np.ones(BATCH_SIZE), generated_classes])
epoch_DG_losses.append(DG_loss)
save_generated_samples(epoch)
| gpl-3.0 |
achim1/ctplot | setup.py | 2 | 2879 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Have a look at https://pythonhosted.org/setuptools
# http://stackoverflow.com/questions/7522250/how-to-include-package-data-with-setuptools-distribute
# http://stackoverflow.com/questions/1231688/how-do-i-remove-packages-installed-with-pythons-easy-install
# http://stackoverflow.com/questions/6344076/differences-between-distribute-distutils-setuptools-and-distutils2?answertab=active#tab-top
from ez_setup import use_setuptools
use_setuptools()
import os
from datetime import datetime
from subprocess import check_output
from setuptools import setup, find_packages
# https://pythonhosted.org/setuptools/setuptools.html#non-package-data-files
# http://peak.telecommunity.com/DevCenter/PythonEggs#accessing-package-resources
from pkg_resources import resource_string, resource_filename, require
required_libs = ['matplotlib >=1.1', 'numpy >=0.9', 'scipy >=0.12', 'pytz', 'numexpr >=1.4',
'tables >=2.2', 'python-dateutil >=1.5', 'Pillow >=3.1.0', 'basemap >=1.0', 'locket']
def readme(name):
"""Utility function to read the README file.
Used for the long_description. It's nice, because now
1) we have a top level README file and
2) it's easier to type in the README file than to put a raw string in below"""
return resource_string(__name__, name)
def update_version():
cwd = os.path.dirname(__file__)
version = '1.3b0' # see PEP 440
revision = 'unknown'
try:
revision = check_output('git describe --dirty=+', shell = True, cwd = cwd).strip()
except:
pass
version_py = 'ctplot/__version__.py'
build_date = '{:%F %T}'.format(datetime.now())
with open(os.path.join(cwd, version_py), 'w') as f:
f.write("__version__ = '{}'\n".format(version))
f.write("__revision__ = '{}'\n".format(revision))
f.write("__build_date__ = '{}'\n".format(build_date))
print 'updated', version_py, 'to', version, revision, build_date
update_version()
import ctplot
setup(
name = ctplot.__name__,
version = ctplot.__version__,
author = ctplot.__author__,
author_email = ctplot.__author_email__,
description = ctplot.__description__,
license = ctplot.__license__,
url = ctplot.__url__,
packages = find_packages(),
long_description = readme('README.md'),
install_requires = required_libs,
extra_require = {
'server': ['tornado']
},
entry_points = {'console_scripts':[
'rawdata=ctplot.rawdata:main',
'mergedata=ctplot.merge:main',
'ctplot=ctplot.plot:main',
'ctserver=ctplot.webserver:main'
]},
package_data = {
'ctplot':['web/*.*', 'web/*/*.*', 'web/*/*/*.*']
},
zip_safe = True
)
| gpl-3.0 |
zorroblue/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 17 | 21562 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_deprecation_of_n_components_in_linkage_tree():
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
# Test for warning of deprecation of n_components in linkage_tree
children, n_nodes, n_leaves, parent = assert_warns(DeprecationWarning,
linkage_tree,
X.T,
n_components=10)
children_t, n_nodes_t, n_leaves_t, parent_t = linkage_tree(X.T)
assert_array_equal(children, children_t)
assert_equal(n_nodes, n_nodes_t)
assert_equal(n_leaves, n_leaves_t)
assert_equal(parent, parent_t)
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering_wrong_arg_memory():
# Test either if an error is raised when memory is not
# either a str or a joblib.Memory instance
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randn(n_samples, 50)
memory = 5
clustering = AgglomerativeClustering(memory=memory)
assert_raises(ValueError, clustering.fit, X)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
def test_affinity_passed_to_fix_connectivity():
# Test that the affinity parameter is actually passed to the pairwise
# function
size = 2
rng = np.random.RandomState(0)
X = rng.randn(size, size)
mask = np.array([True, False, False, True])
connectivity = grid_to_graph(n_x=size, n_y=size,
mask=mask, return_as=np.ndarray)
class FakeAffinity:
def __init__(self):
self.counter = 0
def increment(self, *args, **kwargs):
self.counter += 1
return self.counter
fa = FakeAffinity()
linkage_tree(X, connectivity=connectivity, affinity=fa.increment)
assert_equal(fa.counter, 3)
| bsd-3-clause |
equialgo/scikit-learn | sklearn/metrics/classification.py | 4 | 71965 | """Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# Bernardo Stein <bernardovstein@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None, weights=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
List of weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<http://www.mitpressjournals.org/doi/abs/10.1162/coli.07-034-R2#.V0J1MJMrIWo>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall : float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
support : int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
else:
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting." % y_type)
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
The reported averages are a prevalence-weighted macro-average across
classes (equivalent to :func:`precision_recall_fscore_support` with
``average='weighted'``).
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
target_names = [u'%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n'
rows = zip(target_names, p, r, f1, s)
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += u'\n'
# compute averages
report += row_fmt.format(last_line_heading,
np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s),
np.sum(s),
width=width, digits=digits)
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None,
classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default=None)
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
.. versionadded:: 0.18
classes : array, shape = [n_labels], optional
(deprecated) Integer array of labels. This parameter has been
renamed to ``labels`` in version 0.18 and will be removed in 0.20.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
if classes is not None:
warnings.warn("'classes' was renamed to 'labels' in version 0.18 and "
"will be removed in 0.20.", DeprecationWarning)
labels = classes
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(labels) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int or str, default=None
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
wazeerzulfikar/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
gromitsun/sim-xrf-py | others/scatt_bg/scatt_bg3_wpw.py | 1 | 4121 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib.colors import LogNorm
RECALC = False
if RECALC:
from scipy.integrate import dblquad
import sys, os
sys.path.append(os.path.dirname(os.path.realpath(__file__))+'/../../core/')
from sim_calcspec import *
protein = compound(CP = 'C30H50O10N9S')
water = compound(CP = 'H2O')
Zn = compound(CP = 'Zn')
p1 = water
p1 = mix_compound([water, protein],[.75,.15])
ev = 1e4
theta = np.radians(np.arange(1,181))
beta = np.radians(np.arange(-90,91))
theta, beta = np.meshgrid(theta,beta)
scatt = p1.dmac_rayleigh_pol(ev,theta,beta)+p1.dmac_compton_pol(ev,theta,beta)
Z = symbol2number('Zn')
xrf = mac_pe_line_kissel_cascade(ev, Z, KA_LINE)
_dcs_rayleigh_pol = lambda beta_rad, theta_rad:p1.dmac_rayleigh_pol(ev, theta_rad, beta_rad)*np.sin(theta_rad)
_dcs_compton_pol = lambda beta_rad, theta_rad:p1.dmac_compton_pol(ev, theta_rad, beta_rad)*np.sin(theta_rad)
ray_sum = np.array([])
comp_sum = np.array([])
subtend_arr = np.array([])
for x in range(1,91):
omega=solid_angle(angle_range = [90-x, 90+x, -x, x], n_theta = 4*x, n_beta = 4*x)
x = np.radians(x)
a,b,gfun,hfun = [np.pi/2-x, np.pi/2+x, lambda y: -x, lambda y: x]
ray = dblquad(_dcs_rayleigh_pol,a,b,gfun,hfun)[0]/omega.subtend
comp = dblquad(_dcs_compton_pol,a,b,gfun,hfun)[0]/omega.subtend
ray_sum = np.append(ray_sum,ray)
comp_sum = np.append(comp_sum,comp)
subtend_arr = np.append(subtend_arr,omega.subtend)
im = np.array([r*xrf/(4*np.pi)*np.sqrt(subtend_arr/(r*xrf/(4*np.pi)+0.05*2*(ray_sum+comp_sum))) for r in np.logspace(-9,-3,10)])
np.savez("scatt_bg3_wpw.npz",ray=ray_sum,comp=comp_sum,xrf=xrf,subtend=subtend_arr)
data = np.load("scatt_bg3_wpw.npz")
ray=data['ray']
comp=data['comp']
xrf=data['xrf']
subtend=data['subtend']
c = np.logspace(-9,-3,10).reshape(-1,1)
im = c*xrf/(4*np.pi)*np.sqrt(subtend/(c*xrf/(4*np.pi)+0.02*2*(ray+comp)))
# # #fonts# # #
import matplotlib
from matplotlib import rc
matplotlib.rcParams['pdf.fonttype'] = 'truetype'
fontProperties = {'family':'serif','serif':['Arial'],
'weight' : 'normal', 'size' : '12'}
rc('font',**fontProperties)
# # #
plt.figure()
plt.imshow(im,extent=[1,90,1e-9,1e-3],origin = 'lower',norm=LogNorm())
plt.colorbar()
plt.yscale('log')
plt.xlabel('Collection semi-angle (deg)')
plt.ylabel('Zn mass concentration')
plt.figure()
plt.imshow(9/(im**2),extent=[1,90,1e-9,1e-3],origin = 'lower',norm=LogNorm(), vmin = 5e1, vmax = 5e14)
plt.colorbar()
plt.yscale('log')
plt.xlabel('Collection semi-angle (deg)')
plt.ylabel('Zn mass concentration')
# # contour plot
# from matplotlib.ticker import LogFormatterMathtext
# cplot=plt.contour(9/(im**2),extent=[1,90,1e-9,1e-3],colors='w',norm=LogNorm()) #contour plot, log scale
# plt.clabel(cplot,inline=True,fontsize=15,fmt=LogFormatterMathtext()) #label on the contour, also log scale
plt.show()
X, Y = np.meshgrid(range(10),range(90))
plt.figure()
ax = plt.gca(projection='3d')
# surf = ax.plot_surface(X, Y, im, cmap=cm.coolwarm)
surf = ax.plot_surface(X, Y, np.log10(im[::-1].T), rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=True)
plt.yticks([0,45,90],[0,45,90])
plt.xticks([0,3,6,9],[r'10$^{-3}$',r'10$^{-5}$',r'10$^{-7}$',r'10$^{-9}$'])
plt.xlabel('Zn mass concentration')
plt.ylabel(r'Collection semi-angle $\phi$ (deg)')
ax.set_zlabel(r'Contrast parameter $\Theta$')
zlabels = {}
# for z in np.arange(-7,1,1):
# zlabels[z+7] = r''
for z in np.arange(-7,1,1):
zlabels[z+7] = r'10$^{%d}$'%z
ax.set_zticklabels(zlabels)
cbar = plt.colorbar(surf, shrink=0.8, aspect=10)
cmax,cmin = (np.log10(im.max()),np.log10(im.min()))
cbar.ax.get_yaxis().set_ticks([])
for j, lab in enumerate([r'10$^{-6}$',r'10$^{-5}$',r'10$^{-4}$',r'10$^{-3}$',r'10$^{-2}$',r'10$^{-1}$']):
cbar.ax.text(1.5, (j-6-cmin) / (cmax-cmin), lab, ha='center', va='center')
cbar.ax.get_yaxis().labelpad = 15
ax1 = cbar.ax.twiny()
ax1.set_xlabel(r'$\Theta$', rotation=0)
ax1.set_xticks([])
plt.show() | mit |
jbloomlab/dms_tools2 | dms_tools2/utils.py | 1 | 59560 | """
===================
utils
===================
Miscellaneous utilities for ``dms_tools2``.
"""
import os
import math
import sys
import time
import platform
import importlib
import logging
import tempfile
import textwrap
import itertools
import collections
import random
import re
import pysam
import numpy
import scipy.misc
import scipy.special
import pandas
import gzip
import dms_tools2
from dms_tools2 import CODONS, CODON_TO_AA, AAS_WITHSTOP, AA_TO_CODONS, NTS
import dms_tools2._cutils
def sessionInfo():
"""Returns string with information about session / packages."""
s = [
'Version information:',
'\tTime and date: {0}'.format(time.asctime()),
'\tPlatform: {0}'.format(platform.platform()),
'\tPython version: {0}'.format(
sys.version.replace('\n', ' ')),
'\tdms_tools2 version: {0}'.format(dms_tools2.__version__),
]
for modname in ['Bio', 'pandas', 'numpy', 'IPython',
'jupyter', 'matplotlib', 'plotnine', 'natsort', 'pystan',
'scipy', 'seaborn', 'phydmslib', 'statsmodels', 'rpy2',
'regex', 'umi_tools']:
try:
v = importlib.import_module(modname).__version__
s.append('\t{0} version: {1}'.format(modname, v))
except AttributeError:
s.append('\t{0} version unknown'.format(modname))
except ImportError:
s.append("\t{0} cannot be imported".format(modname))
return '\n'.join(s)
def initLogger(logfile, prog, args):
"""Initialize output logging for scripts.
Args:
`logfile` (str or `sys.stdout`)
Name of file to which log is written, or
`sys.stdout` if you just want to write information
to standard output.
`prog` (str)
Name of program for which we are logging.
`args` (dict)
Program arguments as arg / value pairs.
Returns:
If `logfile` is a string giving a file name, returns
an opened and initialized `logging.Logger`. If `logfile`
is `sys.stdout`, then writes information to `sys.stdout`.
In either case, basic information is written about the program
and args.
"""
if logfile == sys.stdout:
logfile.write("Beginning execution of {0} in directory {1}\n\n".format(
prog, os.getcwd()))
logfile.write("{0}\n\n".format(sessionInfo()))
logfile.write("Parsed the following arguments:\n\t{0}\n\n".format(
'\n\t'.join(['{0} = {1}'.format(arg, val) for (arg, val)
in args.items()])))
else:
if os.path.isfile(logfile):
os.remove(logfile)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(prog)
logfile_handler = logging.FileHandler(logfile)
logger.addHandler(logfile_handler)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
logfile_handler.setFormatter(formatter)
try:
logger.info("Beginning execution of {0} in directory {1}\n"
.format(prog, os.getcwd()))
logger.info("Progress is being logged to {0}".format(logfile))
logger.info("{0}\n".format(sessionInfo()))
logger.info("Parsed the following arguments:\n\t{0}\n".format(
'\n\t'.join(['{0} = {1}'.format(arg, val) for (arg, val)
in args.items()])))
except:
logger.exception("Error")
raise
return logger
def iteratePairedFASTQ(r1files, r2files, r1trim=None, r2trim=None):
"""Iterates over FASTQ files for single or paired-end sequencing.
Args:
`r1files` (list or str)
Name of R1 FASTQ file or list of such files. Can optionally
be gzipped.
`r2files` (list or str or `None`)
Like `r1files` but for R2 files, or `None` if no R2.
`r1trim` (int or `None`)
If not `None`, trim `r1` and `q1` to be no longer than this.
`r2trim` (int or `None`)
Like `r1trim` but for R2.
Returns:
Each iteration returns `(name, r1, r2, q1, q2, fail)` where:
- `name` is a string giving the read name
- `r1` and `r2` are strings giving the reads; `r2`
is `None` if no R2.
- `q1` and `q2` are strings giving the PHRED Q scores;
`q2` is none if no R2.
- `fail` is `True` if either read failed Illumina chastity
filter, `False` if both passed, `None` if info not present.
We run a simple test by first writing an example FASTQ file and
then testing on it.
>>> n1_1 = '@DH1DQQN1:933:HMLH5BCXY:1:1101:2165:1984 1:N:0:CGATGT'
>>> r1_1 = 'ATGCAATTG'
>>> q1_1 = 'GGGGGIIII'
>>> n2_1 = '@DH1DQQN1:933:HMLH5BCXY:1:1101:2165:1984 2:N:0:CGATGT'
>>> r2_1 = 'CATGCATA'
>>> q2_1 = 'G<GGGIII'
>>> tf = tempfile.NamedTemporaryFile
>>> with tf(mode='w') as r1file, tf(mode='w') as r2file:
... _ = r1file.write('\\n'.join([
... n1_1, r1_1, '+', q1_1,
... n1_1.replace(':N:', ':Y:'), r1_1, '+', q1_1,
... n1_1.split()[0], r1_1, '+', q1_1,
... ]))
... r1file.flush()
... _ = r2file.write('\\n'.join([
... n2_1, r2_1, '+', q2_1,
... n2_1, r2_1, '+', q2_1,
... n2_1, r2_1, '+', q2_1,
... ]))
... r2file.flush()
... itr = iteratePairedFASTQ(r1file.name, r2file.name, r1trim=4, r2trim=5)
... next(itr) == (n1_1.split()[0][1 : ], r1_1[ : 4],
... r2_1[ : 5], q1_1[ : 4], q2_1[ : 5], False)
... next(itr) == (n1_1.split()[0][1 : ], r1_1[ : 4],
... r2_1[ : 5], q1_1[ : 4], q2_1[ : 5], True)
... next(itr) == (n1_1.split()[0][1 : ], r1_1[ : 4],
... r2_1[ : 5], q1_1[ : 4], q2_1[ : 5], None)
True
True
True
Now do the same test but for just R1:
>>> with tf(mode='w') as r1file:
... _ = r1file.write('\\n'.join([
... n1_1, r1_1, '+', q1_1,
... n1_1.replace(':N:', ':Y:'), r1_1, '+', q1_1,
... n1_1.split()[0], r1_1, '+', q1_1,
... ]))
... r1file.flush()
... itr_R1 = iteratePairedFASTQ(r1file.name, None, r1trim=4)
... next(itr_R1) == (n1_1.split()[0][1 : ], r1_1[ : 4],
... None, q1_1[ : 4], None, False)
... next(itr_R1) == (n1_1.split()[0][1 : ], r1_1[ : 4],
... None, q1_1[ : 4], None, True)
... next(itr_R1) == (n1_1.split()[0][1 : ], r1_1[ : 4],
... None, q1_1[ : 4], None, None)
True
True
True
"""
if isinstance(r1files, str):
r1files = [r1files]
if r2files is not None:
r2files = [r2files]
if not all(map(os.path.isfile, r1files)):
raise ValueError('cannot find all `r1files`')
if r2files is None:
r2files = [None] * len(r1files)
elif len(r1files) != len(r2files):
raise ValueError('`r1files` and `r2files` differ in length')
elif not all(map(os.path.isfile, r2files)):
raise ValueError('cannot find all `r2files`')
for (r1file, r2file) in zip(r1files, r2files):
r1reader = pysam.FastxFile(r1file)
if r2file is None:
read_iterator = r1reader
else:
r2reader = pysam.FastxFile(r2file)
read_iterator = zip(r1reader, r2reader)
for tup in read_iterator:
if r2file is None:
a1 = tup
r2 = q2 = None
else:
a1, a2 = tup
r2 = a2.sequence
q2 = a2.quality
if a2.comment is not None:
id2 = f"{a2.name} {a2.comment}".split()
else:
id2 = a2.name.split()
name2 = id2[0]
r1 = a1.sequence
q1 = a1.quality
if a1.comment is not None:
id1 = f"{a1.name} {a1.comment}".split()
else:
id1 = a1.name.split()
name1 = id1[0]
if r2file is not None:
# trims last two chars, need for SRA downloaded files
if name1[-2 : ] == '.1' and name2[-2 : ] == '.2':
name1 = name1[ : -2]
name2 = name2[ : -2]
if name1 != name2:
raise ValueError(f"name mismatch {name1} vs {name2}")
# parse chastity filter assuming CASAVA 1.8 header
try:
f1 = id1[1][2]
if r2file is None:
f2 = 'N'
else:
f2 = id2[1][2]
if f1 == 'N' and f2 == 'N':
fail = False
elif f1 in ['N', 'Y'] and f2 in ['N', 'Y']:
fail = True
except IndexError:
fail = None # header does not specify chastity filter
if r1trim is not None:
r1 = r1[ : r1trim]
q1 = q1[ : r1trim]
if (r2trim is not None) and (r2file is not None):
r2 = r2[ : r2trim]
q2 = q2[ : r2trim]
yield (name1, r1, r2, q1, q2, fail)
def lowQtoN(r, q, minq, use_cutils=True):
"""Replaces low quality nucleotides with ``N`` characters.
Args:
`r` (str)
A string representing a sequencing read.
`q` (str)
String of same length as `r` holding Q scores
in Sanger ASCII encoding.
`minq` (length-one string)
Replace all positions in `r` where `q` is < this.
`use_cutils` (bool)
Use the faster implementation in the `_cutils` module.
Returns:
A version of `r` where all positions `i` where
`q[i] < minq` have been replaced with ``N``.
>>> r = 'ATGCAT'
>>> q = 'GB<.0+'
>>> minq = '0'
>>> lowQtoN(r, q, minq) == 'ATGNAN'
True
"""
if use_cutils:
return dms_tools2._cutils.lowQtoN(r, q, minq)
assert len(r) == len(q)
return ''.join([ri if qi >= minq else 'N'
for (ri, qi) in zip(r, q)])
def buildReadConsensus(reads, minreads, minconcur, use_cutils=True):
"""Builds consensus sequence of some reads.
You may want to pre-fill low-quality sites with ``N``
using `lowQtoN`. An ``N`` is considered a non-called identity.
Args:
`reads` (list)
List of reads as strings. If reads are not all same
length, shorter ones are extended from 3' end with ``N``
to match maximal length.
`minreads` (int)
Only call consensus at a site if at least this many reads
have called identity.
`minconcur` (float)
Only call consensus at site if >= this fraction of called
identities agree.
`use_cutils` (bool)
Use the faster implementation in the `_cutils` module.
Returns:
A string giving the consensus sequence. Non-called
sites are returned as ``N```.
>>> reads = ['ATGCAT',
... 'NTGNANA',
... 'ACGNNTAT',
... 'NTGNTA']
>>> buildReadConsensus(reads, 2, 0.75) == 'ATGNNNAN'
True
>>> reads.append('CTGCATAT')
>>> buildReadConsensus(reads, 2, 0.75) == 'NTGCATAT'
True
"""
if use_cutils:
return dms_tools2._cutils.buildReadConsensus(reads,
minreads, minconcur)
readlens = list(map(len, reads))
maxlen = max(readlens)
consensus = []
for i in range(maxlen):
counts = {}
for (r, lenr) in zip(reads, readlens):
if lenr > i:
x = r[i]
if x != 'N':
if x in counts:
counts[x] += 1
else:
counts[x] = 1
ntot = sum(counts.values())
if ntot < minreads:
consensus.append('N')
else:
(nmax, xmax) = sorted([(n, x) for (x, n) in counts.items()])[-1]
if nmax / float(ntot) >= minconcur:
consensus.append(xmax)
else:
consensus.append('N')
return ''.join(consensus)
def rarefactionCurve(barcodes, *, maxpoints=1e5, logspace=True):
"""Rarefaction curve from list of barcodes.
Uses the analytical formula for the rarefaction curve defined
`on Wikipedia <https://en.wikipedia.org/wiki/Rarefaction_(ecology)#Derivation>`_.
Args:
`barcodes` (list or pandas Series)
Holds the list of unique barcodes for which we calculate
the rarefaction curve. It is expected that some of these
barcodes will be repeated multiple times in the list if
the sampling is approaching saturation.
`maxpoints` (int)
Only calculate values at this many points. The benefit
of this is that it can become very costly to calculate
the curve at every point when there are many points.
`logspace` (True)
Logarithmically space the `maxpoints` points for
the calculation. This will give better results if
we are subsampling and the curve saturates. Only
done if we have to subsample.
Returns:
The 2-tuple `(nreads, nbarcodes)`, where both `nreads` and
`nbarcodes` are lists of the same length, and `nbarcodes[i]`
is the expected number of barcodes observed when there are
`nreads[i]` reads.
Here we take a very small list and show that the results given
by the function are equivalent to those obtained by random
subsampling:
>>> barcodes = ['A', 'A', 'A', 'A', 'G', 'G', 'C', 'T']
>>> (nreads, nbarcodes) = rarefactionCurve(barcodes)
>>> random.seed(1)
>>> nrand = 100000
>>> sim_equal_calc = []
>>> for n in range(1, len(barcodes) + 1):
... nbarcodes_sim = sum([len(set(random.sample(barcodes, n)))
... for _ in range(nrand)]) / nrand
... sim_equal_calc.append(numpy.allclose(nbarcodes_sim,
... nbarcodes[nreads.index(n)], atol=1e-2))
>>> all(sim_equal_calc)
True
"""
N = len(barcodes) # total number of items
Ni = collections.Counter(barcodes)
K = len(Ni)
Mj = collections.Counter(Ni.values())
Nk, num = map(numpy.array, zip(*Mj.items()))
# use simplification that (N - Ni)Cr(n) / (N)Cr(n) =
# [(N - Ni)! * (N - n)!] / [N! * (N - Ni - n)!]
#
# Also use fact that gamma(x + 1) = x!
nbarcodes = []
lnFactorial_N = scipy.special.gammaln(N + 1)
if logspace and N > maxpoints:
nreads = list(numpy.unique(numpy.logspace(
math.log10(1), math.log10(N),
num=int(min(N, maxpoints))).astype('int')))
else:
nreads = list(numpy.unique(numpy.linspace(
1, N, num=min(N, maxpoints)).astype('int')))
for n in nreads:
lnFactorial_N_minus_n = scipy.special.gammaln(N - n + 1)
i = numpy.nonzero(N - Nk - n >= 0) # indices where this is true
nbarcodes.append(
K - (num[i] * numpy.exp(
scipy.special.gammaln(N - Nk[i] + 1) +
lnFactorial_N_minus_n -
lnFactorial_N -
scipy.special.gammaln(N - Nk[i] - n + 1))
).sum()
)
return (nreads, nbarcodes)
def reverseComplement(s, use_cutils=True):
"""Gets reverse complement of DNA sequence `s`.
Args:
`s` (str)
Sequence to reverse complement.
`use_cutils` (bool)
Use the faster implementation in the `_cutils` module.
Returns:
Reverse complement of `s` as a str.
>>> s = 'ATGCAAN'
>>> reverseComplement(s) == 'NTTGCAT'
True
"""
if use_cutils:
return dms_tools2._cutils.reverseComplement(s)
return ''.join(reversed([dms_tools2.NTCOMPLEMENT[nt] for nt in s]))
def alignSubamplicon(refseq, r1, r2, refseqstart, refseqend, maxmuts,
maxN, chartype, use_cutils=True):
"""Try to align subamplicon to reference sequence at defined location.
Tries to align reads `r1` and `r2` to `refseq` at location
specified by `refseqstart` and `refseqend`. Determines how many
sites of type `chartype` have mutations, and if <= `maxmuts` conside
the subamplicon to align if fraction of ambiguous nucleotides <= `maxN`.
In `r1` and `r2`, an ``N`` indicates a non-called ambiguous identity.
If the reads disagree in a region of overlap that is set to ``N`` in
the final subamplicon, but if one read has ``N`` and the other a called
identity, then the called identity is used in the final subamplicon.
Args:
`refseq` (str)
Sequence to which we align. if `chartype` is 'codon',
must be a valid coding (length multiple of 3).
`r1` (str)
The forward sequence to align.
`r2` (str)
The reverse sequence to align. When reverse complemented,
should read backwards in `refseq`.
`refseqstart` (int)
The nucleotide in `refseq` (1, 2, ... numbering) where the
first nucleotide in `r1` aligns.
`refseqend` (int)
The nucleotide in `refseq` (1, 2, ... numbering) where the
first nucleotide in `r2` aligns (note that `r2` then reads
backwards towards the 5' end of `refseq`).
`maxmuts` (int or float)
Maximum number of mutations of character `chartype` that
are allowed in the aligned subamplicons from the two reads.
`maxN` (int or float)
Maximum number of nucleotides for which we allow
ambiguous (``N``) identities in final subamplicon.
`chartype` (str)
Character type for which we count mutations.
Currently, the only allowable value is 'codon'.
`use_cutils` (bool)
Use the faster implementation in the `_cutils` module.
Returns:
If reads align, return aligned subamplicon as string (of length
`refseqend - refseqstart + 1`). Otherwise return `None`.
>>> refseq = 'ATGGGGAAA'
>>> s = alignSubamplicon(refseq, 'GGGGAA', 'TTTCCC', 3, 9, 1, 1, 'codon')
>>> s == 'GGGGAAA'
True
>>> s = alignSubamplicon(refseq, 'GGGGAA', 'TTTCCC', 1, 9, 1, 1, 'codon')
>>> s == False
True
>>> s = alignSubamplicon(refseq, 'GGGGAT', 'TTTCCC', 3, 9, 1, 0, 'codon')
>>> s == False
True
>>> s = alignSubamplicon(refseq, 'GGGGAT', 'TTTCCC', 3, 9, 1, 1, 'codon')
>>> s == 'GGGGANA'
True
>>> s = alignSubamplicon(refseq, 'GGGGAT', 'TATCCC', 3, 9, 1, 0, 'codon')
>>> s == 'GGGGATA'
True
>>> s = alignSubamplicon(refseq, 'GGGGAT', 'TATCCC', 3, 9, 0, 0, 'codon')
>>> s == False
True
>>> s = alignSubamplicon(refseq, 'GGGNAA', 'TTTCCC', 3, 9, 0, 0, 'codon')
>>> s == 'GGGGAAA'
True
>>> s = alignSubamplicon(refseq, 'GGGNAA', 'TTNCCC', 3, 9, 0, 0, 'codon')
>>> s == 'GGGGAAA'
True
>>> s = alignSubamplicon(refseq, 'GTTTAA', 'TTTAAA', 3, 9, 1, 0, 'codon')
>>> s == 'GTTTAAA'
True
>>> s = alignSubamplicon(refseq, 'GGGGTA', 'TTACCC', 3, 9, 1, 0, 'codon')
>>> s == 'GGGGTAA'
True
>>> s = alignSubamplicon(refseq, 'GGGCTA', 'TTAGCC', 3, 9, 1, 0, 'codon')
>>> s == False
True
"""
r2 = reverseComplement(r2)
if use_cutils:
return dms_tools2._cutils.alignSubamplicon(refseq, r1, r2,
refseqstart, refseqend, maxmuts, maxN, chartype)
assert chartype in ['codon'], "Invalid chartype"
if chartype == 'codon':
assert len(refseq) % 3 == 0, "refseq length not divisible by 3"
len_subamplicon = refseqend - refseqstart + 1
len_r1 = len(r1)
len_subamplicon_minus_len_r2 = len_subamplicon - len(r2)
subamplicon = []
for i in range(len_subamplicon):
if i < len_subamplicon_minus_len_r2: # site not in r2
if i < len_r1: # site in r1
subamplicon.append(r1[i])
else: # site not in r1
subamplicon.append('N')
else: # site in r2
if i < len_r1: # site in r1
r1i = r1[i]
r2i = r2[i - len_subamplicon_minus_len_r2]
if r1i == r2i:
subamplicon.append(r1i)
elif r1i == 'N':
subamplicon.append(r2i)
elif r2i == 'N':
subamplicon.append(r1i)
else:
subamplicon.append('N')
else: # site not in r1
subamplicon.append(r2[i - len_subamplicon_minus_len_r2])
subamplicon = ''.join(subamplicon)
if subamplicon.count('N') > maxN:
return False
if chartype == 'codon':
if refseqstart % 3 == 1:
startcodon = (refseqstart + 2) // 3
codonshift = 0
elif refseqstart % 3 == 2:
startcodon = (refseqstart + 1) // 3 + 1
codonshift = 2
elif refseqstart % 3 == 0:
startcodon = refseqstart // 3 + 1
codonshift = 1
nmuts = 0
for icodon in range(startcodon, refseqend // 3 + 1):
mutcodon = subamplicon[3 * (icodon - startcodon) + codonshift :
3 * (icodon - startcodon) + 3 + codonshift]
if ('N' not in mutcodon) and (mutcodon !=
refseq[3 * icodon - 3 : 3 * icodon]):
nmuts += 1
if nmuts > maxmuts:
return False
else:
raise ValueError("Invalid chartype")
return subamplicon
def incrementCounts(refseqstart, subamplicon, chartype, counts):
"""Increment counts dict based on an aligned subamplicon.
This is designed for keeping track of counts of different
mutations / identities when aligning many subamplicons to
a sequence.
Any positions where `subamplicon` has an ``N`` are ignored,
and not added to `counts`.
Args:
`refseqstart` (int)
First nucleotide position in 1, 2, ... numbering
where `subamplicon` aligns.
`subamplicon` (str)
The subamplicon.
`chartype` (str)
Character type for which we are counting mutations.
Currently, only allowable value is 'codon'.
`counts` (dict)
Stores counts of identities, and is incremented by
this function. Is a dict keyed by every possible
character (e.g., codon), with values lists with
element `i` holding the counts for position `i`
in 0, 1, ... numbering.
Returns:
On completion, `counts` has been incremented.
>>> codonlen = 10
>>> counts = dict([(codon, [0] * codonlen) for codon
... in CODONS])
>>> subamplicon1 = 'ATGGACTTTC'
>>> incrementCounts(1, subamplicon1, 'codon', counts)
>>> subamplicon2 = 'GGTCTTTCCCGGN'
>>> incrementCounts(3, subamplicon2, 'codon', counts)
>>> counts['ATG'][0] == 1
True
>>> counts['GAC'][1] == 1
True
>>> counts['GTC'][1] == 1
True
>>> counts['TTT'][2] == 2
True
>>> counts['CCC'][3] == 1
True
>>> sum([sum(c) for c in counts.values()]) == 6
True
"""
if chartype == 'codon':
if refseqstart % 3 == 1:
startcodon = (refseqstart + 2) // 3 - 1
codonshift = 0
elif refseqstart % 3 == 2:
startcodon = (refseqstart + 1) // 3
codonshift = 2
elif refseqstart % 3 == 0:
startcodon = refseqstart // 3
codonshift = 1
else:
raise ValueError("Invalid chartype")
shiftedsubamplicon = subamplicon[codonshift : ]
for i in range(len(shiftedsubamplicon) // 3):
codon = shiftedsubamplicon[3 * i : 3 * i + 3]
if 'N' not in codon:
counts[codon][startcodon + i] += 1
def codonToAACounts(counts):
"""Makes amino-acid counts `pandas.DataFrame` from codon counts.
Args:
`counts` (`pandas.DataFrame`)
Columns are the string `site` `wildtype` and all codons
in `CODONS`. Additional columns are allowed
but ignored.
Returns:
`aacounts` (`pandas.DataFrame`)
Columns are the string `site` and all amino acids
in `AAS_WITHSTOP` with counts for each
amino acid made by summing counts for encoding codons.
>>> d = {'site':[1, 2], 'othercol':[0, 0], 'ATG':[105, 1],
... 'GGG':[3, 117], 'GGA':[2, 20], 'TGA':[0, 1],
... 'wildtype':['ATG', 'GGG']}
>>> for codon in CODONS:
... if codon not in d:
... d[codon] = [0, 0]
>>> counts = pandas.DataFrame(d)
>>> aacounts = codonToAACounts(counts)
>>> 'othercol' in aacounts.columns
False
>>> all(aacounts['site'] == [1, 2])
True
>>> all(aacounts['wildtype'] == ['M', 'G'])
True
>>> all(aacounts['M'] == [105, 1])
True
>>> all(aacounts['G'] == [5, 137])
True
>>> all(aacounts['*'] == [0, 1])
True
>>> all(aacounts['V'] == [0, 0])
True
"""
d = dict([(key, []) for key in ['site', 'wildtype'] +
AAS_WITHSTOP])
for (i, row) in counts.iterrows():
d['site'].append(row['site'])
d['wildtype'].append(CODON_TO_AA[row['wildtype']])
for aa in AAS_WITHSTOP:
d[aa].append(0)
for c in CODONS:
d[CODON_TO_AA[c]][-1] += (row[c])
return pandas.DataFrame(d)
def annotateCodonCounts(counts):
"""Gets annotated `pandas.DataFrame` from codon counts.
Some of the programs (e.g., `dms2_bcsubamplicons`) create
``*_codoncounts.csv`` files when run with ``--chartype codon``.
These CSV files have columns indicating the `site` and `wildtype`
codon, as well as a column for each codon giving the counts for that
codon. This function reads that file (or a `pandas.DataFrame` read
from it) to return a `pandas.DataFrame` where a variety of additional
useful annotations have been added.
Args:
`counts` (str)
Name of existing codon counts CSV file, or `pandas.DataFrame`
holding counts.
Returns:
`df` (`pandas.DataFrame`)
The DataFrame with the information in `counts` plus
the following added columns for each site:
`ncounts` : number of counts at site
`mutfreq` : mutation frequency at site
`nstop` : number of stop-codon mutations
`nsyn` : number of synonymous mutations
`nnonsyn` : number of nonsynonymous mutations
`n1nt` : number of 1-nucleotide codon mutations
`n2nt` : number of 2-nucleotide codon mutations
`n3nt` : number of 3-nucleotide codon mutations
`AtoC`, `AtoG`, etc : number of each nucleotide mutation
type among codon mutations with **one** nucleotide change.
`mutfreq1nt`, `mutfreq2nt`, `mutfreq3nt` : frequency
of 1-, 2-, and 3-nucleotide codon mutations at site.
>>> d = {'site':[1, 2], 'wildtype':['ATG', 'GGG'], 'ATG':[105, 1],
... 'GGG':[3, 117], 'GGA':[2, 20], 'TGA':[0, 1]}
>>> for codon in CODONS:
... if codon not in d:
... d[codon] = [0, 0]
>>> counts = pandas.DataFrame(d)
>>> with tempfile.NamedTemporaryFile(mode='w') as f:
... counts.to_csv(f, index=False)
... f.flush()
... df = annotateCodonCounts(f.name)
>>> all([all(df[col] == counts[col]) for col in counts.columns])
True
>>> all(df['ncounts'] == [110, 139])
True
>>> all(df['mutfreq'] == [5 / 110., 22 / 139.])
True
>>> all(df['nstop'] == [0, 1])
True
>>> all(df['nsyn'] == [0, 20])
True
>>> all(df['nnonsyn'] == [5, 1])
True
>>> all(df['n1nt'] == [0, 20])
True
>>> all(df['n2nt'] == [3, 2])
True
>>> all(df['n3nt'] == [2, 0])
True
>>> all(df['GtoA'] == [0, 20])
True
>>> all(df['AtoC'] == [0, 0])
True
>>> all(df['mutfreq1nt'] == [0, 20 / 139.])
True
>>> all(df['mutfreq3nt'] == [2 / 110., 0])
True
"""
if isinstance(counts, str):
df = pandas.read_csv(counts)
elif isinstance(counts, pandas.DataFrame):
df = counts.copy()
else:
raise ValueError("invalid counts")
assert set(CODONS) <= set(df.columns), \
"Did not find counts for all codons".format(counts)
df['ncounts'] = df[CODONS].sum(axis=1)
df['mutfreq'] = (((df['ncounts'] - df.lookup(df['wildtype'].index,
df['wildtype'].values)) / df['ncounts'].astype('float'))
.fillna(0))
ntchanges = ['{0}to{1}'.format(nt1, nt2) for nt1 in dms_tools2.NTS
for nt2 in dms_tools2.NTS if nt1 != nt2]
nstoplist = []
nsynlist = []
nnonsynlist = []
nXntlists = dict([(n + 1, []) for n in range(3)])
nntchangeslists = dict([(ntchange, []) for ntchange in ntchanges])
for (i, row) in df.iterrows():
nstop = nsyn = nnonsyn = 0
nXnt = dict([(n + 1, 0) for n in range(3)])
nntchanges = dict([(ntchange, 0) for ntchange in ntchanges])
wt = row['wildtype']
wtaa = CODON_TO_AA[wt]
for c in CODONS:
if c == wt:
continue
aa = CODON_TO_AA[c]
if aa == '*':
nstop += row[c]
elif aa == wtaa:
nsyn += row[c]
else:
nnonsyn += row[c]
ntdiffs = ['{0}to{1}'.format(nt1, nt2) for (nt1, nt2)
in zip(wt, c) if nt1 != nt2]
nXnt[len(ntdiffs)] += row[c]
if len(ntdiffs) == 1:
nntchanges[ntdiffs[0]] += row[c]
nstoplist.append(nstop)
nsynlist.append(nsyn)
nnonsynlist.append(nnonsyn)
for n in range(3):
nXntlists[n + 1].append(nXnt[n + 1])
for ntchange in ntchanges:
nntchangeslists[ntchange].append(nntchanges[ntchange])
df = df.assign(nstop=nstoplist, nsyn=nsynlist, nnonsyn=nnonsynlist)
df = df.assign(n1nt=nXntlists[1], n2nt=nXntlists[2], n3nt=nXntlists[3])
for ntchange in ntchanges:
df[ntchange] = nntchangeslists[ntchange]
for nnt in range(3):
df['mutfreq{0}nt'.format(nnt + 1)] = (df['n{0}nt'.format(nnt + 1)]
/ df['ncounts'].astype('float')).fillna(0)
return df
def adjustErrorCounts(errcounts, counts, charlist, maxexcess):
"""Adjust error counts to not greatly exceed counts of interest.
This function is useful when estimating preferences. Under the
model, the error-control should not have a higher rate of error
than the actual sample. However, this could happen if the experimental
data don't fully meet the assumptions. So this function scales
down the error counts in that case.
Args:
`errcounts` (pandas.DataFrame)
Holds counts for error control.
`counts` (pandas.DataFrame)
Holds counts for which we are correcting errors.
`charlist` (list)
Characters for which we have counts.
`maxexcess` (int)
Only let error-control counts exceed actual by this much.
Returns:
A copy of `errcounts` except for any non-wildtype character,
the maximum frequency of that character is adjusted to be
at most the number predicted by the frequency in `counts`
plus `maxexcess`.
>>> counts = pandas.DataFrame({'site':[1], 'wildtype':['A'],
... 'A':500, 'C':10, 'G':40, 'T':20})
>>> errcounts = pandas.DataFrame({'site':[1], 'wildtype':['A'],
... 'A':250, 'C':1, 'G':30, 'T':10})
>>> charlist = ['A', 'C', 'G', 'T']
>>> errcounts = errcounts[['site', 'wildtype'] + charlist]
>>> adj_errcounts = adjustErrorCounts(errcounts, counts, charlist, 1)
>>> set(adj_errcounts.columns) == set(errcounts.columns)
True
>>> all(adj_errcounts['site'] == errcounts['site'])
True
>>> all(adj_errcounts['wildtype'] == errcounts['wildtype'])
True
>>> (adj_errcounts[adj_errcounts['site'] == 1][charlist].values[0]
... == numpy.array([250, 1, 21, 10])).all()
True
"""
cols = counts.columns
counts = counts.sort_values('site')
errcounts = errcounts.sort_values('site')
assert all(counts['site'] == errcounts['site'])
assert all(counts['wildtype'] == errcounts['wildtype'])
counts['total'] = counts[charlist].sum(axis=1).astype('float')
errcounts['total'] = errcounts[charlist].sum(axis=1)
maxallowed = (counts[charlist].div(counts['total'], axis=0).multiply(
errcounts['total'], axis=0) + maxexcess).round().astype('int')
adj_errcounts = errcounts[charlist].where(errcounts[charlist] < maxallowed,
maxallowed[charlist])
for c in charlist:
adj_errcounts[c] = adj_errcounts[c].where(counts['wildtype'] != c,
errcounts[c])
for col in cols:
if col not in charlist:
adj_errcounts[col] = counts[col]
return adj_errcounts[cols]
def convertCountsFormat(oldfile, newfile, charlist):
"""Convert counts file from ``dms_tools`` to ``dms_tools2`` format.
Args:
`oldfile` (str)
Name of counts file in the old ``dms_tools`` format:
http://jbloomlab.github.io/dms_tools/fileformats.html
`newfile` (str)
Name of created counts file in the ``dms_tools2`` format:
https://jbloomlab.github.io/dms_tools2/dms2_bcsubamp.html
`charlist` (list)
List of characters that we expect in the counts files.
For instance, could be `CODONS`.
"""
with open(oldfile) as f:
header = f.readline()
assert header[0] == '#'
cols = header[1 : ].split()
assert cols[0] == 'POSITION' and cols[1] == 'WT'
cols = ['site', 'wildtype'] + cols[2 : ]
assert set(charlist) == set(cols[2 : ])
old = pandas.read_csv(oldfile, delim_whitespace=True,
names=cols, comment='#')
old.to_csv(newfile, index=False)
def renumberSites(renumbfile, infiles, missing='error',
outfiles=None, outprefix=None, outdir=None):
"""Renumber sites in CSV files.
Switch numbering scheme in files with a column named `site`.
You must specify **exactly one** of `outfiles`,
`outprefix`, and `outdir` as something other than `None`.
Args:
`renumbfile` (str)
Name of existing CSV file with the re-numbering scheme.
Should have columns with name `original` and `new`.
Each entry in `original` should refer to a site in
the input files, and each entry in `new` should be
the new number for this site. If an entry in `new`
is `None` or `nan` then it is dropped from the newly
numbered files regardless of `missing`.
`infiles` (list)
List of existing CSV files that we are re-numbering.
Each file must have an entry of `site`.
`missing` (str)
How to handle sites in `infiles` but not `renumbfile`.
- `error`: raise an error
- `skip`: skip renumbering, leave with original number
- `drop`: drop any sites not in `renumbfile`
`outfiles` (list)
List of output files of the same length as `infiles`.
The numbered version of `infiles` is named as the
corresponding entry in `outfiles`.
`outdir` (str)
A directory name. The renumbered files have the same
names as in `infile`, but are now placed in `outdir`.
`outprefix` (str)
The renumbered files have the same names and locations
as `infiles`, but have the pre-pended filename extension
`outprefix`.
"""
assert os.path.isfile(renumbfile), "no renumbfile {0}".format(renumbfile)
renumb = pandas.read_csv(renumbfile)
assert {'original', 'new'} <= set(renumb.columns), \
"renumbfile lacks columns `original` and/or `new`"
for col in ['original', 'new']:
assert len(renumb[col]) == len(set(renumb[col])), \
"duplicate sites for {0} in {1}".format(col, renumbfile)
renumb[col] = renumb[col].astype('str')
assert isinstance(infiles, list), "infiles is not a list"
nin = len(infiles)
infiles = [os.path.abspath(f) for f in infiles]
assert len(set(infiles)) == nin, "duplicate files in `infiles`"
if outfiles is not None:
assert isinstance(outfiles, list), "`outfiles` not list"
assert (outdir is None) and (outprefix is None), \
"only specify one of `outfiles`, `outdir`, and `outprefix`"
nout = len(outfiles)
assert nout == nin, "`outfiles` and `infiles` different length"
elif outdir is not None:
assert isinstance(outdir, str), "`outdir` should be string"
assert (outfiles is None) and (outprefix is None), \
"only specify one of `outfiles`, `outdir`, and `outprefix`"
if not os.path.isdir(outdir):
os.mkdir(outdir)
outfiles = [os.path.join(outdir, os.path.basename(f))
for f in infiles]
elif outprefix is not None:
assert isinstance(outprefix, str), "`outdir` should be string"
assert (outfiles is None) and (outdir is None), \
"only specify one of `outfiles`, `outdir`, and `outprefix`"
outfiles = [os.path.join(os.path.dirname(f), outprefix +
os.path.basename(f)) for f in infiles]
else:
raise ValueError("specify `outdir`, `outprefix`, `outfiles`")
outfiles = [os.path.abspath(f) for f in outfiles]
assert len(set(outfiles)) == len(outfiles), "duplicate files in `outfiles`"
assert not set(outfiles).intersection(set(infiles)), \
"some in and outfiles the same"
for (fin, fout) in zip(infiles, outfiles):
df_in = pandas.read_csv(fin)
assert 'site' in df_in.columns, "no `site` column in {0}".format(fin)
df_in['site'] = df_in['site'].astype('str')
if missing == 'error':
if set(df_in['site']) > set(renumb['original']):
raise ValueError("`missing` is `error`, excess sites in {0}"
.format(fin))
elif missing == 'skip':
pass
elif missing == 'drop':
df_in = df_in[df_in['site'].isin(renumb['original'])]
else:
raise ValueError("invalid `missing` of {0}".format(missing))
# can't just use replace below because of this bug:
# https://github.com/pandas-dev/pandas/issues/16051
unmappedsites = df_in[~df_in['site'].isin(renumb['original'])]['site']
replacemap = dict(zip(
renumb['original'].append(unmappedsites),
renumb['new'].append(unmappedsites)))
df_in['site'] = df_in['site'].map(replacemap)
df_in = (df_in[df_in['site'].notnull()]
.query('site != "NaN"')
.query('site != "nan"')
.query('site != "None"')
)
df_in.to_csv(fout, index=False)
def codonEvolAccessibility(seqs):
"""Accessibility of amino acids by nucleotide mutations.
Args:
`seqs` (str or list)
A single coding sequence or a list of such sequences.
Returns:
A pandas DataFrame listing all sites in the sequence(s)
numbered 1, 2, ..., with columns giving the accessibility
of each amino acid by single nucleotide mutations.
The accessibility of codon :math:`c` to amino-acid :math:`a`
by single-nucleotide mutations is defined as the minimum
number of nucleotide mutations needed to generate that
amino-acid.
For a collection of sequences, we calculate the
accessibility as the weighted average of the accessibilities
of all codons observed at that site in the collection of
sequences.
As an example, compute accessibility for one sequence:
>>> s = "ATGGGA"
>>> acc = codonEvolAccessibility(s)
The returned pandas DataFrame `acc` is has a column named
`site` plus columns for all amino acids:
>>> all(acc.columns == ['site'] + AAS_WITHSTOP)
True
We look at entries for a few amino acids. At the first
site, the wildtype entry in the sequence `s` is the codon
for *M* (``ATG``). So at this site, the distance to *M*
is 0. The distance to *I* (which has codon ``ATA`` as a
codon) is 1, and the distance to *W* (which has only ``TGG``
as a codon) is 2.
>>> acc[['site', 'G', 'I', 'M', 'W']]
site G I M W
0 1 2.0 1.0 0.0 2.0
1 2 0.0 2.0 3.0 2.0
If we pass the function a list of multiple sequences,
then the accessibilities are averaged over the sequences:
>>> acc2 = codonEvolAccessibility(['ATGGGA', 'ATAGGA'])
>>> acc2[['site', 'G', 'I', 'M', 'W']]
site G I M W
0 1 2.0 0.5 0.5 2.5
1 2 0.0 2.0 3.0 2.0
"""
# get number of nucleotide diffs between all pairs of codons
nt_diffs = dict([
((c1, c2), sum(1 for x1, x2 in zip(c1, c2) if x1 != x2))
for c1, c2 in itertools.product(CODONS, repeat=2)])
# get number of nucleotide diffs to nearest codon for amino acid
aa_nt_diffs = {}
for c in CODONS:
for aa, othercs in AA_TO_CODONS.items():
aa_nt_diffs[(c, aa)] = min([nt_diffs[(c, c2)]
for c2 in othercs])
# make sure seqs are of same valid length
if isinstance(seqs, str):
seqs = [seqs]
assert len(seqs[0]) % 3 == 0, "seqs not of length divisible by 3"
assert all([len(seqs[0]) == len(s) for s in seqs[1 : ]]), \
"seqs not all of same length"
# get nucleotide distances, summing for all sequences
dists = collections.defaultdict(lambda: collections.defaultdict(float))
for s in seqs:
for r in range(len(s) // 3):
c = s[3 * r : 3 * r + 3]
assert c in CODONS, "invalid codon {0}".format(c)
for aa in AAS_WITHSTOP:
dists[r + 1][aa] += aa_nt_diffs[(c, aa)]
return (pandas.DataFrame.from_dict(dists, orient='index')
.rename_axis('site')
[AAS_WITHSTOP]
/ len(seqs)).reset_index()
def sigFigStr(x, nsig):
"""Get str of `x` with `nsig` significant figures.
>>> sigFigStr(11190, 2)
'11000'
>>> sigFigStr(117, 2)
'120'
>>> sigFigStr(6, 2)
'6.0'
>>> sigFigStr(0.213, 2)
'0.21'
>>> sigFigStr(0.007517, 3)
'0.00752'
"""
if x <= 0:
raise ValueError('currently only handles numbers > 0')
x = float(f"{{:.{nsig}g}}".format(x))
if x >= 10**(nsig - 1):
return '{:d}'.format(round(x))
else:
predecimal = math.floor(math.log10(x)) + 1
postdecimal = nsig - predecimal
assert postdecimal > 0, str(x)
return f"{{:.{postdecimal}f}}".format(x)
def getSubstitutions(wildtype, mutant, amino_acid=False):
"""Get space delimited string of substitutions
Args:
`wildtype` (str):
The wildtype sequence
`mutant` (str):
The mutant sequence
`amino_acid` (bool)
Specify whether the sequence is amino acid.
Default is False
Returns:
A space delimited string of substitutions present in the
mutant sequence
>>> getSubstitutions('AGT', 'TGT')
'A1T'
>>> getSubstitutions('AAGTAACGA', 'ATCTAACGA')
'A2T G3C'
>>> getSubstitutions('TYARV', 'GYAGV', amino_acid=True)
'T1G R4G'
"""
if len(wildtype) != len(mutant):
raise ValueError('wildtype and mutant must be same length')
subs = []
for site in range(len(wildtype)):
wt = wildtype[site]
mut = mutant[site]
if amino_acid:
if wt not in AAS_WITHSTOP:
raise ValueError (f"Invalid wt residue {wt} at site {site+1}")
if mut not in AAS_WITHSTOP:
raise ValueError (f"Invalid mutant residue {mut} at site {site+1}")
else:
if wt not in NTS:
raise ValueError (f"Invalid wt nucleotide {wt} at site {site+1}")
if mut not in NTS:
raise ValueError (f"Invalid mutant nucleotide {mut} at site {site+1}")
if wt!=mut:
pos = str(site + 1)
subs.append(f"{wt}{pos}{mut}")
subs = ' '.join(subs)
return subs
def codon_to_nt_counts(codoncounts):
"""Convert codon counts file to nucleotide counts.
Args:
`codoncounts` (str or pandas.DataFrame)
Codon counts in format produced by ``dms2_bcsubamp``,
either as CSV file or data frame holding CSV.
Returns:
pandas.DataFrame with nucleotide counts.
Example:
>>> with tempfile.NamedTemporaryFile('w') as f:
... _ = f.write(textwrap.dedent('''
... site,wildtype,AAA,AAC,AAG,AAT,ACA,ACC,ACG,ACT,AGA,AGC,AGG,AGT,ATA,ATC,ATG,ATT,CAA,CAC,CAG,CAT,CCA,CCC,CCG,CCT,CGA,CGC,CGG,CGT,CTA,CTC,CTG,CTT,GAA,GAC,GAG,GAT,GCA,GCC,GCG,GCT,GGA,GGC,GGG,GGT,GTA,GTC,GTG,GTT,TAA,TAC,TAG,TAT,TCA,TCC,TCG,TCT,TGA,TGC,TGG,TGT,TTA,TTC,TTG,TTT
... 1,ATG,0,0,0,0,0,0,2,0,0,0,0,0,8,0,333985,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
... 2,AAG,16,20,333132,41,13,12,27,14,8,6,67,8,9,13,29,9,10,11,12,8,10,15,15,11,6,9,3,7,8,10,17,4,3,7,49,7,9,14,9,4,10,7,7,7,9,11,11,5,14,14,11,6,13,16,15,14,9,9,15,8,9,11,8,15
... 3,GCA,2,3,8,3,34,11,7,6,7,6,9,8,4,3,5,0,6,14,10,12,6,8,7,10,5,11,7,6,6,1,3,12,19,6,11,9,333250,10,6,9,15,3,5,5,37,9,9,7,8,4,8,3,23,5,7,8,6,11,7,10,7,9,3,6
... '''.strip()))
... f.flush()
... nt_counts = codon_to_nt_counts(f.name)
>>> nt_counts
site wildtype A C G T
0 1 A 334009 0 6 0
1 2 T 0 2 0 334013
2 3 G 8 0 333993 14
3 4 A 333424 156 169 187
4 5 A 333361 211 186 178
5 6 G 156 185 333427 168
6 7 G 116 124 333410 125
7 8 C 126 333407 121 121
8 9 A 333435 114 112 114
"""
if not isinstance(codoncounts, pandas.DataFrame):
codoncounts = pandas.read_csv(codoncounts)
if codoncounts['site'].dtype != int:
raise ValueError('`site` column in `codoncounts` must be integer')
nt_counts = []
for i_nt in [0, 1, 2]:
nt_counts.append(
codoncounts
.melt(id_vars=['site', 'wildtype'],
var_name='codon',
value_name='count',
)
.assign(
site=lambda x: 3 * (x['site'] - 1) + i_nt + 1,
wildtype=lambda x: x['wildtype'].str[i_nt],
nucleotide=lambda x: x['codon'].str[i_nt],
)
.groupby(['site', 'wildtype', 'nucleotide'])
.aggregate({'count': 'sum'})
.reset_index()
.pivot_table(values='count',
columns='nucleotide',
index=['site', 'wildtype'])
.reset_index()
)
nt_counts = (pandas.concat(nt_counts)
.sort_values('site')
.reset_index(drop=True)
)
del nt_counts.columns.name
return nt_counts
def barcodeInfoToCodonVariantTable(samples, geneseq, path=None):
"""Convert barcode info files into a CodonVariantTable
Convert barcode info files output from `dms2_bcsubamp` into a
`CodonVariantTable`. Barcode info files contain reads and barcodes from
barcoded subamplicon sequencing, described
`here <https://jbloomlab.github.io/dms_tools2/bcsubamp.html>`_.
This function takes consensus reads retained by `dms2_bcsubamp`,
gives each unique sequence a numerical barcode (since the barcodes from
`dms2_bcsubamp` could come from the same variant), and counts the number
of retained consensus reads corresponding to each sequence. Then, a
`CodonVariantTable` is made using the sequences and their numerical
barcodes, and counts are added based on the number of retained consensus
reads of those sequences. Therefore, the `CodonVariantTable` will only
contain one 'variant' for each unique sequence with the total count for all
the unbarcoded variants in the experiment which had the same sequence.
Args:
`samples` (dict):
Dictionary with libraries as keys and lists of info file prefixes
(file names without the '_bcinfo.txt.gz') for files corresponding
to those libraries as values.
Example: {'library-1':['condition-1-library-1'],
'library-2':['condition-1-library-2']}
`geneseq` (str):
The wildtype gene sequence
`path` (str)
Directory in which barcode info files are located
Returns:
A `dms_variants.codonvarianttable.CodonVariantTable` with 'counts'
generated from the barcode info files
"""
# Set up re matchers for looking at lines
matcher = re.compile(r'(?P<linetype>^.*\:) '
r'(?P<contents>.*$)')
alt_matcher = re.compile(r'(?P<linetype>^R\d READS:$)')
read_matcher = re.compile(r'(?P<read>^[ATGCN\s]*$)')
# Create a dictionary to contain dictionaries of each library's barcodes
libraries = {}
# Initialize lists for making the codonvarianttable
barcodes = []
subs = []
variant_call_support = []
library_list = []
# For each library, go through each sample file and collect data
for library in samples.keys():
# Initialize dictionary to contain this library's reads and barcodes
barcode_dictionary = {}
# Start a barcode count for this library
cur_barcode = 1
# For each barcode info file corresponding to a sample in this library
for sample in samples[library]:
# Set initial conditions
take_next = False
description_skipped = False
# Find the file
f = f"{sample}_bcinfo.txt.gz"
if path:
file_path = os.path.join(os.path.abspath(path), f)
else:
file_path = f
# Open the file and loop through it to find retained consensus
# reads and give them each a new barcode
with gzip.open(file_path, 'r') as f:
# Make sure the first line looks like it is supposed to
firstline = f.readline()
firstline = firstline.decode()
first_match = matcher.match(firstline)
if first_match.group('linetype') != 'BARCODE:':
raise ValueError(f"Unexpected first line {firstline}: may be "
"unexpected file type")
else:
previous_line = first_match
# Go through the lines, making they are in the expected order
for line in f:
line = line.decode()
line_match = matcher.match(line)
if not line_match:
line_match = alt_matcher.match(line)
if not line_match:
read_match = read_matcher.match(line)
if not read_match:
raise ValueError(f"Unable to recognize line {line}")
else:
line_is_read = True
previous_linetype = previous_line.group('linetype')
if previous_linetype != 'R1 READS:' and \
previous_linetype != 'R2 READS:':
raise ValueError(f"Unexpected line {line}")
else:
line_is_read = False
if previous_line.group('linetype') == 'BARCODE:':
if line_match.group('linetype') != 'RETAINED:':
raise ValueError(f"Unexpected line {line}")
# Decide whether to retain the next consensus or not
else:
if line_match.group('contents') == 'False':
retain = False
elif line_match.group('contents') == 'True':
retain = True
else:
raise ValueError(f"Unexpected line {line}")
elif previous_line.group('linetype') == 'RETAINED:':
if line_match.group('linetype') != 'DESCRIPTION:':
raise ValueError(f"Unexpected line {line}")
elif previous_line.group('linetype') == 'DESCRIPTION:':
if line_match.group('linetype') != 'CONSENSUS:':
raise ValueError(f"Unexpected line {line}")
# Make sure we know whether to retain or not
elif not isinstance(retain, bool):
raise ValueError(
f"Unclear whether to retain {line_match.group('contents')}"
)
elif retain:
read = line_match.group('contents')
# Add the read to the dictionary if not in it
# Also give it a barcode
if 'N' not in read:
if read not in barcode_dictionary:
# Create the sequence in the dictionary
barcode_dictionary[read] = {}
# Give it an initial count of 1 for this sample
barcode_dictionary[read][sample] = 1
# Give it the next barcode
barcode_dictionary[read]['barcode'] = cur_barcode
# Save values for making CodonVariantTable
barcodes.append(cur_barcode)
subs.append(getSubstitutions(geneseq, read))
variant_call_support.append(1)
library_list.append(library)
# Advance current barcode
cur_barcode += 1
else:
# Add a counter for the sample if sequence
# not seen for this sample yet
if sample not in barcode_dictionary[read]:
barcode_dictionary[read][sample] = 1
else:
# Add another count to this read for
# this sample
barcode_dictionary[read][sample] += 1
# Set retain to None
retain = None
elif previous_line.group('linetype') == 'CONSENSUS:':
if line_match.group('linetype') != 'R1 READS:':
raise ValueError(f"Unexpected line {line}")
elif previous_line.group('linetype') == 'R1 READS:':
if not line_is_read:
if line_match.group('linetype') != 'R2 READS:':
raise ValueError(f"Unexpected line {line}")
elif previous_line.group('linetype') == 'R2 READS:':
if not line_is_read:
if line_match.group('linetype') != 'BARCODE:':
raise ValueError(f"Unexpected line {line}")
# Save this line as the previous line if it is not a read
if not line_is_read:
previous_line = line_match
# After going through each file for a library, save its dictionary with
# reads and barcodes
libraries[library] = barcode_dictionary
# Make the dataframe for creating the codonvarianttable
df = {'barcode':barcodes,
'substitutions':subs,
'library':library_list,
'variant_call_support':variant_call_support,
}
df = pandas.DataFrame(df)
# Make the codonvarianttable
with tempfile.NamedTemporaryFile(mode='w') as f:
df.to_csv(f, index=False)
f.flush()
variants = dms_variants.codonvarianttable.CodonVariantTable(
barcode_variant_file=f.name,
geneseq=geneseq)
# Make the counts dataframe:
# Initialize list of dataframes
dfs = []
# Loop through each library and produce count dataframes for each sample
for library in libraries:
barcode_dictionary = libraries[library]
for sample in samples[library]:
barcodes_list = []
counts_list = []
sample_list = []
library_list = []
# Get counts for this sample
for sequence in barcode_dictionary.keys():
if sample not in barcode_dictionary[sequence].keys():
counts_list.append(0)
else:
counts_list.append(barcode_dictionary[sequence][sample])
barcodes_list.append(barcode_dictionary[sequence]['barcode'])
sample_list.append(sample)
library_list.append(library)
# Make a dataframe for this sample
data = {'barcode':barcodes_list,
'count':counts_list,
'sample':sample_list,
'library':library_list,
}
data = pandas.DataFrame(data)
# Append it to the list of dataframes
dfs.append(data)
# Concatenate the list of dataframes into a counts dataframe
barcode_counts = pandas.concat(dfs)
# Add the counts for each sample to the codonvarianttable
for library in libraries:
for sample in samples[library]:
icounts = barcode_counts.query('library == @library & sample == @sample')
icounts = icounts[['barcode', 'count']]
variants.addSampleCounts(library, sample, icounts)
return(variants)
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
oesteban/mriqc | mriqc/classifier/sklearn/preprocessing.py | 1 | 22145 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# @Author: oesteban
# @Date: 2017-06-08 17:11:58
"""
Extensions to the sklearn's default data preprocessing filters
"""
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.preprocessing import RobustScaler
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelBinarizer
from ... import logging
LOG = logging.getLogger('mriqc.classifier')
class PandasAdaptor(BaseEstimator, TransformerMixin):
"""
Wraps a data transformation to run only in specific
columns [`source <https://stackoverflow.com/a/41461843/6820620>`_].
Example
-------
>>> from sklearn.preprocessing import StandardScaler
>>> from mriqc.classifier.sklearn.preprocessing import PandasAdaptor
>>> tfm = PandasAdaptor(StandardScaler(),
... columns=['duration', 'num_operations'])
>>> # scaled = tfm.fit_transform(churn_d)
"""
def __init__(self, columns=None):
self.columns = columns
def fit(self, X, y=None):
if self.columns is None:
self.columns = X.columns.ravel().tolist()
return self
def transform(self, X, y=None):
try:
return X[self.columns].values
except (IndexError, KeyError):
return X
class ColumnsScaler(BaseEstimator, TransformerMixin):
"""
Wraps a data transformation to run only in specific
columns [`source <https://stackoverflow.com/a/41461843/6820620>`_].
Example
-------
>>> from sklearn.preprocessing import StandardScaler
>>> from mriqc.classifier.sklearn.preprocessing import ColumnsScaler
>>> tfm = ColumnsScaler(StandardScaler(),
... columns=['duration', 'num_operations'])
>>> # scaled = tfm.fit_transform(churn_d)
"""
def __init__(self, scaler, columns=None):
self._scaler = scaler
self.columns = columns
def _numeric_cols(self, X):
columns = self.columns
numcols = list(X.select_dtypes([np.number]).columns.ravel())
if not columns:
return numcols
return [col for col in columns if col in numcols]
def fit(self, X, y=None):
columns = self._numeric_cols(X)
self._scaler.fit(X[columns], y)
return self
def transform(self, X, y=None):
columns = self._numeric_cols(X)
col_order = X.columns
scaled_x = pd.DataFrame(self._scaler.transform(
X[columns]), columns=columns)
unscaled_x = X.ix[:, ~X.columns.isin(columns)]
return pd.concat([unscaled_x, scaled_x], axis=1)[col_order]
class GroupsScaler(BaseEstimator, TransformerMixin):
"""
Wraps a data transformation to run group-wise.
Example
-------
>>> from sklearn.preprocessing import StandardScaler
>>> from mriqc.classifier.sklearn.preprocessing import GroupsScaler
>>> tfm = GroupsScaler(StandardScaler(), groups='site')
>>> # scaled = tfm.fit_transform(churn_d)
"""
def __init__(self, scaler, by='site'):
self.by = by
self._base_scaler = scaler
self._scalers = {}
self._groups = None
self._colnames = None
self._colmask = None
def fit(self, X, y=None):
self._colmask = [True] * X.shape[1]
self._colnames = X.columns.ravel().tolist()
# Identify batches
groups = X[[self.by]].values.ravel().tolist()
self._colmask[X.columns.get_loc(self.by)] = False
# Convert groups to IDs
glist = list(set(groups))
self._groups = np.array([glist.index(group)
for group in groups])
for gid, batch in enumerate(list(set(groups))):
scaler = clone(self._base_scaler)
mask = self._groups == gid
if not np.any(mask):
continue
self._scalers[batch] = scaler.fit(
X.ix[mask, self._colmask], y)
return self
def transform(self, X, y=None):
if self.by in X.columns.ravel().tolist():
groups = X[[self.by]].values.ravel().tolist()
else:
groups = ['Unknown'] * X.shape[0]
glist = list(set(groups))
groups = np.array([glist.index(group) for group in groups])
new_x = X.copy()
for gid, batch in enumerate(glist):
if batch in self._scalers:
mask = groups == gid
if not np.any(mask):
continue
scaler = self._scalers[batch]
new_x.ix[mask, self._colmask] = scaler.transform(
X.ix[mask, self._colmask])
else:
colmask = self._colmask
if self.by in self._colnames and len(colmask) == len(self._colnames):
del colmask[self._colnames.index(self.by)]
scaler = clone(self._base_scaler)
new_x.ix[:, colmask] = scaler.fit_transform(
X.ix[:, colmask])
return new_x
class BatchScaler(GroupsScaler, TransformerMixin):
"""
Wraps a data transformation to run group-wise.
Example
-------
>>> from sklearn.preprocessing import StandardScaler
>>> from mriqc.classifier.sklearn.preprocessing import BatchScaler
>>> tfm = BatchScaler(StandardScaler(), groups='site', columns=[''])
>>> # scaled = tfm.fit_transform(churn_d)
"""
def __init__(self, scaler, by='site', columns=None):
super(BatchScaler, self).__init__(scaler, by=by)
self.columns = columns
self.ftmask_ = None
def fit(self, X, y=None):
# Find features mask
self.ftmask_ = [True] * X.shape[1]
if self.columns:
self.ftmask_ = X.columns.isin(self.columns)
fitmsk = self.ftmask_
if self.by in X.columns:
fitmsk[X.columns.get_loc(self.by)] = True
super(BatchScaler, self).fit(X[X.columns[self.ftmask_]], y)
return self
def transform(self, X, y=None):
new_x = X.copy()
try:
columns = new_x.columns.ravel().tolist()
except AttributeError:
columns = self.columns
if self.by not in columns:
new_x[self.by] = ['Unknown'] * new_x.shape[0]
new_x.ix[:, self.ftmask_] = super(BatchScaler, self).transform(
new_x[new_x.columns[self.ftmask_]], y)
return new_x
class BatchRobustScaler(BatchScaler, TransformerMixin):
def __init__(self, by='site', columns=None, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = True
super(BatchRobustScaler, self).__init__(
RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range),
by=by, columns=columns)
class CustFsNoiseWinnow(BaseEstimator, TransformerMixin):
"""
Remove features with less importance than a noise feature
https://gist.github.com/satra/c6eb113055810f19709fa7c5ebd23de8
"""
def __init__(self, n_winnow=10, disable=False):
self.disable = disable
self.n_winnow = n_winnow
self.importances_ = None
self.importances_snr_ = None
self.idx_keep_ = None
self.mask_ = None
def fit(self, X, y, n_jobs=1):
"""Fit the model with X.
This is the workhorse function.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
self.mask_ : array
Logical array of features to keep
"""
from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
if self.disable:
self.mask_ = np.ones(X.shape[1], dtype=bool)
return self
self.mask_ = np.zeros(X.shape[1], dtype=bool)
clf_flag = True
n_estimators = 1000
X_input = X.copy()
n_sample, n_feature = np.shape(X_input)
# Add "1" to the col dimension to account for always keeping the noise
# vector inside the loop
idx_keep = np.arange(n_feature + 1)
if clf_flag and len(set(list(y))) > 2:
binarize = LabelBinarizer()
y = binarize.fit_transform(y)
counter = 0
noise_flag = True
while noise_flag:
counter = counter + 1
noise_feature = _generate_noise(n_sample, y, clf_flag)
# Add noise feature
X = np.concatenate((X_input, noise_feature), axis=1)
# Initialize estimator
if clf_flag:
clf = ExtraTreesClassifier(
n_estimators=n_estimators,
criterion='gini',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features='sqrt',
max_leaf_nodes=None,
min_impurity_decrease=1e-07,
bootstrap=True,
oob_score=False,
n_jobs=n_jobs,
random_state=None,
verbose=0,
warm_start=False,
class_weight='balanced'
)
else:
clf = ExtraTreesRegressor(
n_estimators=n_estimators, criterion='mse', max_depth=None,
min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0,
max_features='auto', max_leaf_nodes=None, min_impurity_decrease=1e-07,
bootstrap=False, oob_score=False, n_jobs=1, random_state=None, verbose=0,
warm_start=False)
clf.fit(X[:, idx_keep], y)
LOG.debug('done fitting once')
importances = clf.feature_importances_
k = 1
if np.all(importances[0:-1] > k * importances[-1]):
LOG.log(19, 'All features (%d) are better than noise', len(idx_keep) - 1)
# all features better than noise
# comment out to force counter renditions of winnowing
# noise_flag = False
elif np.all(k * importances[-1] > importances[0:-1]):
LOG.warning('No features are better than noise')
# noise better than all features aka no feature better than noise
# Leave as separate if clause in case want to do something different than
# when all feat > noise. Comment out to force counter renditions of winnowing
# noise_flag = False # just take everything
else:
# Tracer()()
idx_keep = idx_keep[importances >= (k * importances[-1])]
# use >= so when saving, can always drop last index
importances = importances[importances >= (k * importances[-1])]
# always keep the noise index, which is n_feature (assuming 0 based python index)
# idx_keep = np.concatenate((
# idx_keep[:, np.newaxis], np.array([[n_feature]])), axis=0)
idx_keep = np.ravel(idx_keep)
# fail safe
if counter >= self.n_winnow:
noise_flag = False
self.importances_ = importances[:-1]
self.importances_snr_ = importances[:-1] / importances[-1]
self.idx_keep_ = idx_keep[:-1]
self.mask_[self.idx_keep_] = True
LOG.info('Feature selection: %d of %d features better than noise feature',
self.mask_.astype(int).sum(), len(self.mask_))
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self = self.fit(X, y)
return X[:, self.mask_]
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is masked.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
check_is_fitted(self, ['mask_'], all_or_any=all)
X = check_array(X)
return X[:, self.mask_]
class SiteCorrelationSelector(BaseEstimator, TransformerMixin):
"""
Remove features with less importance than a noise feature
https://gist.github.com/satra/c6eb113055810f19709fa7c5ebd23de8
"""
def __init__(self, target_auc=0.6, disable=False,
max_iter=None, max_remove=0.7, site_col=-1):
self.disable = disable
self.target_auc = target_auc
self.mask_ = None
self.max_remove = max_remove if max_remove > 0 else None
self.max_iter = max_iter
self.site_col = site_col
def fit(self, X, y, n_jobs=1):
"""Fit the model with X.
This is the workhorse function.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
self.mask_ : array
Logical array of features to keep
"""
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import train_test_split
n_feature = np.shape(X)[1]
self.mask_ = np.ones(n_feature, dtype=bool)
self.mask_[self.site_col] = False # Always remove site
n_feature -= 1 # Remove site
if self.disable:
return self
X_input = X.copy()
sites = X[:, self.site_col].tolist()
if len(set(sites)) == 1:
return self
y_input = LabelBinarizer().fit_transform(sites)
X_train, X_test, y_train, y_test = train_test_split(
X_input, y_input, test_size=0.33, random_state=42)
max_remove = n_feature - 5
if self.max_remove < 1.0:
max_remove = int(self.max_remove * n_feature)
elif int(self.max_remove) < n_feature:
max_remove = int(self.max_remove)
min_score = 1.0
i = 0
while True:
clf = ExtraTreesClassifier(
n_estimators=1000,
criterion='gini',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features='sqrt',
max_leaf_nodes=None,
min_impurity_decrease=1e-07,
bootstrap=True,
oob_score=False,
n_jobs=n_jobs,
random_state=None,
verbose=0,
warm_start=False,
class_weight='balanced'
).fit(X_train[:, self.mask_], y_train)
score = roc_auc_score(
y_test, clf.predict(X_test[:, self.mask_]),
average='macro',
sample_weight=None)
if score < self.target_auc:
break
if np.sum(~self.mask_) >= max_remove:
break
if self.max_iter is not None and i >= self.max_iter:
break
importances = np.zeros(self.mask_.shape)
importances[self.mask_] = clf.feature_importances_
rm_feat = np.argmax(importances)
# Remove feature
self.mask_[rm_feat] = score >= min_score
if score < min_score:
min_score = score
i += 1
LOG.info('Feature selection: kept %d of %d features',
np.sum(self.mask_), n_feature)
return self
def fit_transform(self, X, y=None, n_jobs=1):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self = self.fit(X, y, n_jobs=n_jobs)
return X[:, self.mask_]
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is masked.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
check_is_fitted(self, ['mask_'], all_or_any=all)
if hasattr(X, 'columns'):
X = X.values
X = check_array(X[:, self.mask_])
return X
def _generate_noise(n_sample, y, clf_flag=True):
"""
Generates a random noise sample that is not correlated (<0.05)
with the output y. Uses correlation if regression, and ROC AUC
if classification
"""
if clf_flag:
noise_feature = np.random.normal(
loc=0, scale=10.0, size=(n_sample, 1))
noise_score = roc_auc_score(
y, noise_feature, average='macro', sample_weight=None)
while (noise_score > 0.6) or (noise_score < 0.4):
noise_feature = np.random.normal(
loc=0, scale=10.0, size=(n_sample, 1))
noise_score = roc_auc_score(
y, noise_feature, average='macro', sample_weight=None)
else:
noise_feature = np.random.normal(
loc=0, scale=10.0, size=(n_sample, 1))
while np.abs(np.corrcoef(noise_feature, y[:, np.newaxis], rowvar=0)[0][1]) > 0.05:
noise_feature = np.random.normal(
loc=0, scale=10.0, size=(n_sample, 1))
return noise_feature
# DEPRECATED CODE
# def find_gmed(dataframe, by='site', excl_columns=None):
# sites = list(set(dataframe[[by]].values.ravel().tolist()))
# numcols = dataframe.select_dtypes([np.number]).columns.ravel().tolist()
# if excl_columns:
# numcols = [col for col in numcols if col not in excl_columns]
# LOG.info('Calculating bias of dataset (%d features)', len(numcols))
# site_medians = []
# for site in sites:
# site_medians.append(np.median(dataframe.loc[dataframe.site == site, numcols], axis=0))
# return np.median(np.array(site_medians), axis=0)
# def norm_gmed(dataframe, grand_medians, by='site', excl_columns=None):
# LOG.info('Removing bias of dataset ...')
# all_cols = dataframe.columns.ravel().tolist()
# if by not in all_cols:
# dataframe[by] = ['Unknown'] * len(dataframe)
# sites = list(set(dataframe[[by]].values.ravel().tolist()))
# numcols = dataframe.select_dtypes([np.number]).columns.ravel().tolist()
# if excl_columns:
# numcols = [col for col in numcols if col not in excl_columns]
# for site in sites:
# vals = dataframe.loc[dataframe.site == site, numcols]
# site_med = np.median(vals, axis=0)
# dataframe.loc[dataframe.site == site, numcols] = vals - site_med + grand_medians
# return dataframe
# def find_iqrs(dataframe, by='site', excl_columns=None):
# sites = list(set(dataframe[[by]].values.ravel().tolist()))
# numcols = dataframe.select_dtypes([np.number]).columns.ravel().tolist()
# if excl_columns:
# numcols = [col for col in numcols if col not in excl_columns]
# LOG.info('Calculating IQR of dataset (%d)', len(numcols))
# meds = []
# iqrs = []
# for site in sites:
# vals = dataframe.loc[dataframe.site == site, numcols]
# iqrs.append(mad(vals, axis=0))
# meds.append(np.median(vals, axis=0))
# return [np.median(np.array(meds), axis=0),
# np.median(np.array(iqrs), axis=0)]
# def norm_iqrs(dataframe, mean_iqr, by='site', excl_columns=None):
# LOG.info('Removing bias of dataset ...')
# all_cols = dataframe.columns.ravel().tolist()
# if by not in all_cols:
# dataframe[by] = ['Unknown'] * len(dataframe)
# sites = list(set(dataframe[[by]].values.ravel().tolist()))
# numcols = dataframe.select_dtypes([np.number]).columns.ravel().tolist()
# if excl_columns:
# numcols = [col for col in numcols if col not in excl_columns]
# for site in sites:
# vals = dataframe.loc[dataframe.site == site, numcols]
# vals -= np.median(vals, axis=0)
# iqr = np.percentile(vals, 75, axis=0) - np.percentile(vals, 25, axis=0)
# vals.iloc[:, iqr > 1.e-5] *= (1.0 / iqr[iqr > 1.e-5])
# changecols = vals.iloc[:, iqr > 1.e-5].columns.ravel().tolist()
# dataframe.loc[dataframe.site == site, changecols] = vals
# return dataframe
| bsd-3-clause |
SpencerDodd/CrossBLAST | hist_results.py | 1 | 2749 | import matplotlib.pyplot as plt
import numpy as np
import csv
import sys
class HistogramParser:
def __init__(self, other, superfamily, family, subfamily, genus, species, subspecies):
self.other = other
self.superfamily = superfamily
self.family = family
self.subfamily = subfamily
self.genus = genus
self.species = species
self.subspecies = subspecies
# parses the input file results by phylogenetic group
def hist_parse(self, input_file):
with open(input_file) as csv_file:
reader = csv.reader(csv_file, delimiter = ',', quotechar = '|')
for index, row in enumerate(reader):
if index != 0:
phylo_level = row[4]
percent_div_to_common_anc = float(row[2])
if phylo_level == 'Other':
self.other.append(percent_div_to_common_anc)
elif phylo_level == 'Superfamily':
self.superfamily.append(percent_div_to_common_anc)
elif phylo_level == 'Family':
self.family.append(percent_div_to_common_anc)
elif phylo_level == 'Subfamily':
self.subfamily.append(percent_div_to_common_anc)
elif phylo_level == 'Genus':
self.genus.append(percent_div_to_common_anc)
elif phylo_level == 'Species':
self.species.append(percent_div_to_common_anc)
elif phylo_level == 'Subspecies':
self.subspecies.append(percent_div_to_common_anc)
else:
raise Exception('ERROR | Phylogenetic level of hit not properly defined: {0}'.format(phylo_level))
# creates and saves the histograms for all phylogenetic levels of the results summary
def make_hists(self):
level_names = ['Other', 'Superfamily', 'Family', 'Subfamily', 'Genus', 'Species', 'Subspecies']
levels = [self.other, self.superfamily, self.family, self.subfamily, self.genus, self.species, self.subspecies]
colors = ['dodgerblue', 'blue','cornflowerblue', 'deepskyblue', 'turquoise', 'cyan', 'lightgreen']
bin_max = 10
# finds the max value in the data set to set histogram x-range size
for level in levels:
if len(level) > 0:
level_max = max(level)
if level_max > bin_max:
bin_max = level_max
bins = np.linspace(0, bin_max, 200)
for index, level in enumerate(levels):
if len(level) > 0:
plt.hist(level, bins, alpha = 0.5, label = '{0} (Range: {1} to {2})'.format(level_names[index], min(level), max(level)))
plt.ylabel('Frequency')
plt.xlabel('Percent dist to common ancestor')
plt.title('Overview')
plt.legend(loc = 'upper right')
plt.savefig('{0}Overview.png'.format(results_dir))
def main():
global results_dir
results_dir = sys.argv[1]
parser = HistogramParser([], [], [], [], [], [], [])
parser.hist_parse('{0}Summary.csv'.format(results_dir))
parser.make_hists()
main()
'''
TODO
'''
| mit |
josemao/nilmtk | nilmtk/results.py | 6 | 7403 | import abc
import pandas as pd
import copy
from .timeframe import TimeFrame
from nilmtk.utils import get_tz, tz_localize_naive
class Results(object):
"""Stats results from each node need to be assigned to a specific
class so we know how to combine results from multiple chunks. For
example, Energy can be simply summed; while dropout rate should be
averaged, and gaps need to be merged across chunk boundaries. Results
objects contain a DataFrame, the index of which is the start timestamp for
which the results are valid; the first column ('end') is the end
timestamp for which the results are valid. Other columns are accumulators
for the results.
Attributes
----------
_data : DataFrame
Index is period start.
Columns are: `end` and any columns for internal storage of stats.
Static Attributes
-----------------
name : str
The string used to cache this results object.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
self._data = pd.DataFrame(columns=['end'])
def combined(self):
"""Return all results from each chunk combined. Either return single
float for all periods or a dict where necessary, e.g. if
calculating Energy for a meter which records both apparent
power and active power then get active power with
energyresults.combined['active']
"""
return self._data[self._columns_with_end_removed()].sum()
def per_period(self):
"""return a DataFrame. Index is period start.
Columns are: end and <stat name>
"""
return copy.deepcopy(self._data)
def simple(self):
"""Returns the simplest representation of the results."""
return self.combined()
def append(self, timeframe, new_results):
"""Append a single result.
Parameters
----------
timeframe : nilmtk.TimeFrame
new_results : dict
"""
if not isinstance(timeframe, TimeFrame):
raise TypeError("`timeframe` must be of type 'nilmtk.TimeFrame',"
" not '{}' type.".format(type(timeframe)))
if not isinstance(new_results, dict):
raise TypeError("`new_results` must of a dict, not '{}' type."
.format(type(new_results)))
# check that there is no overlap
for index, series in self._data.iterrows():
tf = TimeFrame(index, series['end'])
tf.check_for_overlap(timeframe)
row = pd.DataFrame(index=[timeframe.start],
columns=['end'] + new_results.keys())
row['end'] = timeframe.end
for key, val in new_results.iteritems():
row[key] = val
self._data = self._data.append(row, verify_integrity=True)
self._data.sort_index(inplace=True)
def check_for_overlap(self):
# TODO this could be made much faster
n = len(self._data)
index = self._data.index
for i in range(n):
row1 = self._data.iloc[i]
tf1 = TimeFrame(index[i], row1['end'])
for j in range(i+1, n):
row2 = self._data.iloc[j]
tf2 = TimeFrame(index[j], row2['end'])
tf1.check_for_overlap(tf2)
def update(self, new_result):
"""Add results from a new chunk.
Parameters
----------
new_result : Results subclass (same
class as self) from new chunk of data.
"""
if not isinstance(new_result, self.__class__):
raise TypeError("new_results must be of type '{}'"
.format(self.__class__))
if new_result._data.empty:
return
self._data = self._data.append(new_result._data)
self._data.sort_index(inplace=True)
self.check_for_overlap()
def unify(self, other):
"""Take results from another table of data (another physical meter)
and merge those results into self. For example, if we have a dual-split
mains supply then we want to merge the results from each physical meter.
The two sets of results must be for exactly the same timeframes.
Parameters
----------
other : Results subclass (same class as self).
Results calculated from another table of data.
"""
assert isinstance(other, self.__class__)
for i, row in self._data.iterrows():
if (other._data['end'].loc[i] != row['end'] or
i not in other._data.index):
raise RuntimeError("The sections we are trying to merge"
" do not have the same end times so we"
" cannot merge them.")
def import_from_cache(self, cached_stat, sections):
"""
Parameters
----------
cached_stat : DataFrame of cached data
sections : list of nilmtk.TimeFrame objects
describing the sections we want to load stats for.
"""
if cached_stat.empty:
return
tz = get_tz(cached_stat)
usable_sections_from_cache = []
def append_row(row, section):
row = row.astype(object)
# We stripped off the timezone when exporting to cache
# so now we must put the timezone back.
row['end'] = tz_localize_naive(row['end'], tz)
if row['end'] == section.end:
usable_sections_from_cache.append(row)
for section in sections:
if not section:
continue
try:
rows_matching_start = cached_stat.loc[section.start]
except KeyError:
pass
else:
if isinstance(rows_matching_start, pd.Series):
append_row(rows_matching_start, section)
else:
for row_i in range(rows_matching_start.shape[0]):
row = rows_matching_start.iloc[row_i]
append_row(row, section)
self._data = pd.DataFrame(usable_sections_from_cache)
self._data.sort_index(inplace=True)
def export_to_cache(self):
"""
Returns
-------
pd.DataFrame
Notes
-----
Objects are converted using `DataFrame.convert_objects()`.
The reason for doing this is to strip out the timezone
information from data columns. We have to do this otherwise
Pandas complains if we try to put a column with multiple
timezones (e.g. Europe/London across a daylight saving
boundary).
"""
return self._data.convert_objects()
def timeframes(self):
"""Returns a list of timeframes covered by this Result."""
# For some reason, using `iterrows()` messes with the
# timezone of the index, hence we need to 'manually' iterate
# over the rows.
return [TimeFrame(self._data.index[i], self._data.iloc[i]['end'])
for i in range(len(self._data))]
def _columns_with_end_removed(self):
cols = set(self._data.columns)
if len(cols) > 0:
cols.remove('end')
cols = list(cols)
return cols
def __repr__(self):
return str(self._data)
| apache-2.0 |
quasars100/Resonance_testing_scripts | alice/multiplesim.py | 1 | 6288 | import rebound
import numpy as np
import reboundxf
import matplotlib.pyplot as plt
from pylab import *
from rebound.interruptible_pool import InterruptiblePool
def anglerange(val):
while val < 0:
val += 2*np.pi
while val > 2*np.pi:
val -= 2*np.pi
return val*180/np.pi
def calc(args):
taue=args
taupo=taue
sim = rebound.Simulation()
sim.force_is_velocity_dependent = 1
sim.G = 4.*(np.pi)**2
sim.integrator = 'whfast'
sim.dt = 0.012
sim.add(m=1.0)
sim.add(m=1.e-8, a=1.0, e=0.0, anom = 0)
sim.add(m=1.e-5, a=2.1**(2.0/3.0), e=0.0, anom = 0)
sim.move_to_com()
tmax = 1.e7
Npts = 1000
sim.post_timestep_modifications = reboundxf.modify_elements()
xf = reboundxf.Params(sim)
xf.e_damping_p =1.
xf.tau_a = [0., 0., 1.e7]
xf.tau_e = [0., taue, 0.]
xf.tau_pomega = [0., 0., 0.]
e1 = np.zeros(Npts)
e2 = np.zeros(Npts)
a1 = np.zeros(Npts)
a2 = np.zeros(Npts)
P1 = np.zeros(Npts)
P2 = np.zeros(Npts)
Pratio = np.zeros(Npts)
l1 = np.zeros(Npts)
l2 = np.zeros(Npts)
vp1 = np.zeros(Npts)
vp2 = np.zeros(Npts)
times = np.linspace(0., tmax, Npts)
evs1 = np.zeros(Npts)
evs2 = np.zeros(Npts)
evc1 = np.zeros(Npts)
evc2 = np.zeros(Npts)
phi1 = np.zeros(Npts)
phi2 = np.zeros(Npts)
phi3 = np.zeros(Npts)
phi4 = np.zeros(Npts)
phi5 = np.zeros(Npts)
phi6 = np.zeros(Npts)
tf = open("onlytaue={0}.txt".format(taue),'w')
for i, time in enumerate(times):
sim.integrate(time)
orbits = sim.calculate_orbits()
e1[i] = orbits[0].e
e2[i] = orbits[1].e
a1[i] = orbits[0].a
a2[i] = orbits[1].a
P1[i] = (a1[i])**(3.0/2.0)
P2[i] = (a2[i])**(3.0/2.0)
Pratio[i] = P2[i]/P1[i]
print(Pratio[i])
l1[i] = orbits[0].l
l2[i] = orbits[1].l
vp1[i] = orbits[0].omega
vp2[i] = orbits[1].omega
phi1[i] = anglerange(2*l1[i] - l2[i] - vp1[i])
phi2[i] = anglerange(3*l1[i] - 2*l2[i] - vp1[i])
phi3[i] = anglerange(4*l1[i] - 3*l2[i] - vp1[i])
phi4[i] = anglerange(5*l1[i] - 4*l2[i] - vp1[i])
phi5[i] = anglerange(6*l1[i] - 5*l2[i] - vp1[i])
phi6[i] = anglerange(7*l1[i] - 6*l2[i] - vp1[i])
evs1[i] = e1[i]*(np.sin(vp1[i]))
evs2[i] = e2[i]*(np.sin(vp2[i]))
evc1[i] = e1[i]*(np.cos(vp1[i]))
evc2[i] = e2[i]*(np.cos(vp2[i]))
tf.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}\t{11}\t{12}\t{13}\t{14}\t{15}\t{16}\t{17}\t{18}\t{19}\n".format(times[i],e1[i],e2[i],a1[i],a2[i],Pratio[i],l1[i],l2[i],vp1[i],vp2[i],evs1[i],evs2[i],evc1[i],evc2[i],phi1[i],phi2[i],phi3[i],phi4[i], phi5[i], phi6[i]))
tf.close()
plt.figure()
plt.plot(times, e1, color = 'black', linewidth = 2.5, linestyle ='solid')
plt.plot(times, e2, color = 'purple', linewidth = 2.5, linestyle = 'dashed')
plt.xlabel('Time (years)', fontsize = 12)
plt.ylabel('Eccentricity', fontsize = 12)
plot(times, e1, color = 'black', linewidth = 1.5, linestyle = 'solid', label = 'Smaller exoplanet eccentricity')
plot(times, e2,color = 'purple', linewidth = 1.5, linestyle = 'dashed', label = 'Larger exoplanet eccentricity')
legend(loc = 'upper right')
plt.savefig('onlytaue={0}_eccentricity.pdf'.format(taue))
plt.figure()
plt.plot(times, Pratio, color = 'blue', linewidth = 3.0)
plt.xlabel('Time (years)', fontsize = 12)
plt.ylabel('Period Ratio', fontsize = 12)
plot(times,Pratio,color = 'blue', linewidth = 2.0, linestyle = 'solid', label = 'Ratio of Planet Orbits')
legend(loc = 'upper center')
plt.savefig('onlytaue={0}_periodratio.pdf'.format(taue))
def ephi_sincos_graph(evc1, evs1, times):
plt.figure()
plt.scatter(evc1, evs1, s=8)
ax = gca()
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0))
plt.xlabel('E*cos(varpi)', fontsize = 12)
plt.ylabel('E*sin(varpi)', fontsize = 12)
evscgraph = plt.savefig('sin,varpi_taupo=e={0}.pdf'.format(taues[1]))
return evscgraph
def phi_graph(time, phi1, phi2, phi3, phi4, phi5, phi6):
plt.figure()
plt.scatter(times[0:100], phi1[0:100],color = 'red', s = 10)
plt.scatter(times[240:360], phi2[240:360], color = 'blue', s=10)
plt.scatter(times[440:580], phi3[440:580], color = 'orange', s=10)
plt.scatter(times[590:760], phi4[590:760], color = 'green', s=10)
plt.scatter(times[740:860], phi5[740:860], color = 'pink', s=10)
plt.scatter(times[840:1000], phi6[840:1000], color = 'purple', s=10)
plt.xlabel('Time (years)', fontsize = 12)
plt.ylabel('Resonance angle', fontsize = 12)
scatter(times[0:100], phi1[0:100], color = 'red', label = '2:1')
scatter(times[240:360], phi2[240:360], color = 'blue', label = '3:2')
scatter(times[440:580], phi3[440:580], color = 'orange', label = '4:3')
scatter(times[590:760], phi4[590:760], color = 'green', label = '5:4')
scatter(times[740:860], phi5[740:860], color = 'pink', label = '6:5')
scatter(times[840:1000], phi6[840:1000], color = 'purple', label = '7:6')
pgraph = plt.savefig('sin,resonance_angles_taupo=e={0}.pdf'.format(taues[1]))
return pgraph
def varpi_graph(vp1, vp2, times):
plt.figure()
plt.scatter(times, vp1, color = 'purple', s=10)
plt.scatter(times, vp2, color = 'blue', s=10)
plt.xlabel('Time (years)', fontsize = 12)
plt.ylabel('Pericentre Distance (m)', fontsize = 12)
scatter(times,vp1, color = 'purple', label = 'smaller planet varpi')
scatter(times, vp2, color = 'blue', label = 'larger planet varpi')
vpgraph = plt.savefig('sin,pericentre_distance_taupo=e={0}.pdf'.format(taues[1]))
return vpgraph
args = np.logspace(4,8,20)
pool = InterruptiblePool(10)
pool.map(calc,args)
filenames=[]
f=open('onlytaue,filenames.txt','w')
for count in range(len(args)):
filenames.append('onlytaue={0}.txt'.format(args[count]))
print(filenames[count])
f.write(str(filenames[count]))
f.write('\t')
f.write(str(args[count]))
f.write('\n')
f.close()
| gpl-3.0 |
sssundar/Drone | motor/quadratic_drag.py | 1 | 2709 | # Solves for the dynamics if a linear torque, quadratic drag model of our DC brushed motor.
# See notes from 8/4/2018-8/12/2018. We're trying to find the relations between Bd, Bm, Gamma, nd Jprop
# that give us linear relations between thrust and the duty cycle, which was measured,
# and which match a mechanical timescale of 100ms for the propellors, which was measured.
# Note, this is a non-linear system whose timescale depends linearly on the starting duty cycle and angular velocity.
# That means if we can find a range of d from w=0 with response timescales of ~100ms, any steps within that domain will also
# respond within 100ms.
import sys
import numpy as np
from matplotlib import pyplot as plt
from scipy.integrate import odeint
def w_ss(d, cw, gamma, beta_m, beta_d):
if d < 1E-9:
return 0
if cw:
return ((d*beta_m)/(2*beta_d)) * (-1 + np.sqrt(1 + ((4*beta_d*gamma)/(beta_m*beta_m*d))))
else:
return ((d*beta_m)/(2*beta_d)) * (1 - np.sqrt(1 + ((4*beta_d*gamma)/(beta_m*beta_m*d))))
def sim(d, cw=True, thrust=False):
Mps = 2.0/1000 # kg
Rps = 0.0025 # meters
Mpp = 0.25/1000 # kg
wpp = 0.5/100 # meters
lpp = 2.54/100 # meters
Jprop = 0.5*Mps*(Rps**2)
Jprop += 0.1*Mpp*(9*(lpp**2) + 4*(wpp**2))
RPM_max = 12000
w_max = (2*np.pi*RPM_max)/60
w_max *= 1 if cw else -1
# Matching w_max and t_mech ~ 0.1s
Bd = Jprop/120
Bm = 10*Bd
Gamma_int_max = (w_max**2)*Bd
Gamma_int_max /= Jprop
Bm /= Jprop
Bd /= Jprop
THRUST_MAX = 0.04 * 9.8 # kg * m/s^2 = Newtons
Bt = THRUST_MAX/(w_max**2)
def dwdt(w, t):
if cw:
return d*Gamma_int_max - d*Bm*w - Bd*(w**2)
else:
return -d*Gamma_int_max - d*Bm*w + Bd*(w**2)
tf = 0.3 #s which we would like to be three exponential timescales
N = 100
time_s = np.linspace(0, tf, N)
w0 = w_ss(0, cw, Gamma_int_max, Bm, Bd)
w = odeint(dwdt, w0, t = time_s)
ws = w_ss(d, cw, Gamma_int_max, Bm, Bd) * np.ones(len(time_s))
if not thrust:
plt.plot(time_s, w, "k-")
plt.plot(time_s, ws, 'r--')
else:
plt.plot(time_s, Bt*w*w, 'k-')
if __name__ == "__main__":
thrust = True
cw = True
if not thrust:
ds = np.linspace(0,1,10)
for d in ds:
sim(d, cw, thrust)
plt.xlabel("Time (s)")
plt.ylabel("w (rad/s)")
plt.title("Propellor Frequency for a DC Brushed Motor\nVarying PWM Duty Cycle with Linear Torque, Quadratic Drag\nGm ~ w_max^2 Bd; Bm ~ 10 Bd; Bd ~ Jp/120\nyield a t~100ms timescale for d varying in [0.1,1]")
plt.show()
else:
ds = np.linspace(0,1,10)
for d in ds:
sim(d, cw, thrust)
plt.xlabel("Time (s)")
plt.ylabel("Thrust (N)")
plt.title("Thrust from a DC Brushed Motor\n")
plt.show()
| gpl-3.0 |
hagabbar/pycbc_copy | pycbc/results/legacy_grb.py | 1 | 22775 | #!/usr/bin/env python
# Copyright (C) 2015 Andrew R. Williamson
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# =============================================================================
# Preamble
# =============================================================================
from __future__ import division
import re
import os
from argparse import ArgumentParser
import matplotlib
# Only if a backend is not already set ... This should really *not* be done
# here, but in the executables you should set matplotlib.use()
# This matches the check that matplotlib does internally, but this *may* be
# version dependenant. If this is a problem then remove this and control from
# the executables directly.
import sys
if not 'matplotlib.backends' in sys.modules:
matplotlib.use('agg')
import matplotlib.pyplot as plt
from pycbc_glue import markup, segments
from lal.gpstime import gps_to_utc, LIGOTimeGPS
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
from matplotlib.ticker import ScalarFormatter
from pycbc.results.color import ifo_color
def initialize_page(title, style, script, header=None):
"""
A function that returns a markup.py page object with the required html
header.
"""
page = markup.page(mode="strict_html")
page._escape = False
page.init(title=title, css=style, script=script, header=header)
return page
def write_banner(title, text=' '):
"""
Write html <title> tag into markup.page object
"""
page = markup.page(mode="strict_html")
page._escape = False
page.div(id="header")
page.h1()
page.add(title)
page.h1.close()
page.h3()
page.add(text)
page.h3.close()
page.hr(class_="short")
page.hr(class_="long")
page.div.close()
page.div(id="container")
return page
def write_table(page, headers, data, cl=''):
"""
Write table in html
"""
page.table(class_=cl)
# list
if cl=='list':
for i in range(len(headers)):
page.tr()
page.th()
page.add('%s' % headers[i])
page.th.close()
page.td()
page.add('%s' % data[i])
page.td.close()
page.tr.close()
else:
page.tr()
for n in headers:
page.th()
page.add('%s' % n)
page.th.close()
page.tr.close()
if data and not re.search('list',str(type(data[0]))):
data = [data]
for row in data:
page.tr()
for item in row:
page.td()
page.add('%s' % item)
page.td.close()
page.tr.close()
page.table.close()
return page
def write_summary(page, args, ifos, skyError=None, ipn=False, ipnError=False):
"""
Write summary of information to markup.page object page
"""
from pylal import antenna
gps = args.start_time
grbdate = gps_to_utc(LIGOTimeGPS(gps))\
.strftime("%B %d %Y, %H:%M:%S %ZUTC")
page.h3()
page.add('Basic information')
page.h3.close()
if ipn:
ra = []
dec = []
td1 = []
td2 = []
td3 = []
timedelay = {}
search_file = '../../../S5IPN_GRB%s_search_180deg.txt' % args.grb_name
for line in open(search_file):
ra.append(line.split()[0])
dec.append(line.split()[1])
th1 = [ 'GPS', 'Date', 'Error Box (sq.deg.)', 'IFOs' ]
td1 = [ gps, grbdate, ipnError, ifos ]
th2 = [ 'RA', 'DEC' ]
th3 = ['Timedelays (ms)', '', '' ]
for ra_i,dec_i in zip(ra,dec):
td_i = [ ra_i, dec_i ]
td2.append(td_i)
ifo_list = [ ifos[i*2:(i*2)+2] for i in range(int(len(ifos)/2)) ]
for j in td2:
for p in range(0, len(ifo_list)):
for q in range(0, len(ifo_list)):
pairs = [ifo_list[p], ifo_list[q]]
ifo_pairs = "".join(pairs)
timedelay[ifo_pairs] = antenna.timeDelay(int(gps),
float(j[0]), float(j[1]), 'degree', ifo_list[p],
ifo_list[q])
timedelay[ifo_pairs]="%.4f" % timedelay[ifo_pairs]
if ifos == 'H1H2L1':
td3.append(['H1L1: %f' % float(timedelay['H1L1'])])
if ifos == 'H1H2L1V1':
td3.append(['H1L1: %f' % float(timedelay['H1L1']),
'H1V1: %f' % float(timedelay['H1V1']),
'L1V1: %f' % float(timedelay['L1V1'])])
if ifos == 'L1V1':
td3.append(['L1V1: %f' % float(timedelay['L1V1'])])
page = write_table(page, th1, td1)
page = write_table(page, th2, td2)
page = write_table(page, th3, td3)
else:
ra = args.ra
dec = args.dec
if skyError:
th = [ 'GPS', 'Date', 'RA', 'DEC', 'Sky Error', 'IFOs' ]
td = [ gps, grbdate, ra, dec, skyError, ifos ]
else:
th = [ 'GPS', 'Date', 'RA', 'DEC', 'IFOs' ]
td = [ gps, grbdate, ra, dec, ifos ]
page = write_table(page, th, td)
return page
def write_antenna(page, args, seg_plot=None, grid=False, ipn=False):
"""
Write antenna factors to merkup.page object page and generate John's
detector response plot.
"""
from pylal import antenna
page.h3()
page.add('Antenna factors and sky locations')
page.h3.close()
th = []
td = []
th2 = []
td2 = []
ifos = [args.ifo_tag[i:i+2] for i in range(0, len(args.ifo_tag), 2)]
if ipn:
antenna_ifo = {}
ra = []
dec = []
# FIXME: Remove hardcoding here and show this in all cases
search_file = open('../../../S5IPN_GRB%s_search_180deg.txt'
% args.grb_name)
for line in search_file:
ra.append(line.split()[0])
dec.append(line.split()[1])
for ifo in ifos:
antenna_ifo[ifo] = []
for k, l in zip(ra, dec):
_, _, _, f_q = antenna.response(args.start_time, float(k),
float(l), 0.0, 0.0, 'degree',
ifo)
antenna_ifo[ifo].append(round(f_q,3))
dectKeys = antenna_ifo.keys()
for elements in range(len(antenna_ifo.values()[0])):
newDict={}
for detectors in range(len(antenna_ifo.keys())):
newDict[dectKeys[detectors]] = antenna_ifo[\
dectKeys[detectors]][elements]
for key in newDict.keys():
th.append(key)
td.append(newDict.values())
page = write_table(page, list(set(th)), td)
for ifo in ifos:
_, _, _, f_q = antenna.response(args.start_time, args.ra, args.dec,
0.0, 0.0, 'degree',ifo)
th.append(ifo)
td.append(round(f_q, 3))
#FIXME: Work out a way to make these external calls safely
#cmmnd = 'projectedDetectorTensor --gps-sec %d --ra-deg %f --dec-deg %f' \
# % (args.start_time,args.ra, args.dec)
#for ifo in ifos:
# if ifo == 'H1':
# cmmnd += ' --display-lho'
# elif ifo == 'L1':
# cmmnd += ' --display-llo'
# elif ifo == 'V1':
# cmmnd += ' --display-virgo'
#status = make_external_call(cmmnd)
page = write_table(page, th, td)
# plot = markup.page()
# p = "projtens.png"
# plot.a(href=p, title="Detector response and polarization")
# plot.img(src=p)
# plot.a.close()
# th2 = ['Response Diagram']
# td2 = [plot() ]
# FIXME: Add these in!!
# plot = markup.page()
# p = "ALL_TIMES/plots_clustered/GRB%s_search.png"\
# % args.grb_name
# plot.a(href=p, title="Error Box Search")
# plot.img(src=p)
# plot.a.close()
# th2.append('Error Box Search')
# td2.append(plot())
# plot = markup.page()
# p = "ALL_TIMES/plots_clustered/GRB%s_simulations.png"\
# % args.grb_name
# plot.a(href=p, title="Error Box Simulations")
# plot.img(src=p)
# plot.a.close()
# th2.append('Error Box Simulations')
# td2.append(plot())
if seg_plot is not None:
plot = markup.page()
p = os.path.basename(seg_plot)
plot.a(href=p, title="Science Segments")
plot.img(src=p)
plot.a.close()
th2.append('Science Segments')
td2.append(plot())
plot = markup.page()
p = "ALL_TIMES/plots_clustered/GRB%s_sky_grid.png"\
% args.grb_name
plot.a(href=p, title="Sky Grid")
plot.img(src=p)
plot.a.close()
th2.append('Sky Grid')
td2.append(plot())
# plot = markup.page()
# p = "GRB%s_inspiral_horizon_distance.png"\
# % args.grb_name
# plot.a(href=p, title="Inspiral Horizon Distance")
# plot.img(src=p)
# plot.a.close()
# th2.append('Inspiral Horizon Distance')
# td2.append(plot())
page = write_table(page, th2, td2)
return page
def write_offsource(page, args, grbtag, onsource=False):
"""
Write offsource SNR versus time plots to markup.page object page
"""
th = ['Re-weighted SNR', 'Coherent SNR']
if onsource:
dir = 'ALL_TIMES'
else:
dir = 'OFFSOURCE'
plot = markup.page()
p = "%s/plots_clustered/GRB%s_bestnr_vs_time_noinj.png" % (dir, grbtag)
plot.a(href=p, title="Coherent SNR versus time")
plot.img(src=p)
plot.a.close()
td = [ plot() ]
plot = markup.page()
p = "%s/plots_clustered/GRB%s_triggers_vs_time_noinj.png" % (dir, grbtag)
plot.a(href=p, title="Coherent SNR versus time")
plot.img(src=p)
plot.a.close()
td.append(plot())
ifos = [args.ifo_tag[i:i+2] for i in range(0, len(args.ifo_tag), 2)]
for ifo in ifos:
th.append('%s SNR' % ifo)
plot = markup.page()
p = "%s/plots_clustered/GRB%s_%s_triggers_vs_time_noinj.png"\
% (dir, grbtag, ifo)
plot.a(href=p, title="%s SNR versus time" % ifo)
plot.img(src=p)
plot.a.close()
td.append(plot())
page = write_table(page, th, td)
return page
def write_chisq(page, injList, grbtag):
"""
Write injection chisq plots to markup.page object page
"""
if injList:
th = ['']+injList + ['OFFSOURCE']
else:
th= ['','OFFSOURCE']
injList = ['OFFSOURCE']
td = []
plots = ['bank_veto','auto_veto','chi_square', 'mchirp']
for test in plots:
pTag = test.replace('_',' ').title()
d = [pTag]
for inj in injList + ['OFFSOURCE']:
plot = markup.page()
p = "%s/plots_clustered/GRB%s_%s_vs_snr_zoom.png" % (inj, grbtag,
test)
plot.a(href=p, title="%s %s versus SNR" % (inj, pTag))
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
return page
def write_inj_snrs(page, ifos, injList, grbtag):
"""
Write injection chisq plots to markup.page object page
"""
if injList:
th = ['']+injList + ['OFFSOURCE']
else:
th= ['','OFFSOURCE']
injList = ['OFFSOURCE']
td = []
ifos = [ifos[i:i+2] for i in range(0, len(ifos), 2)]
plots = ['null_stat2']+['%s_snr' % ifo for ifo in ifos]
for row in plots:
pTag = row.replace('_',' ').title()
d = [pTag]
for inj in injList + ['OFFSOURCE']:
plot = markup.page()
p = "%s/plots_clustered/GRB%s_%s_vs_snr_zoom.png" % (inj, grbtag,
row)
plot.a(href=p, title="%s %s versus SNR" % (inj, pTag))
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
return page
def write_found_missed(page, args, injList):
"""
Write injection found/missed plots to markup.page object page
"""
th = ['']+injList
td = []
#FIXME: Work out a way to make externals calls safely
#d = ['Number of injections']
#for inj in injList:
# cmmnd = 'lwtprint ../*' + inj + '*MISSED*xml -t sim_inspiral | wc -l'
# output,status = make_external_call(cmmnd, shell=True)
# numInjs = int(output)
# cmmnd = 'lwtprint ../*' + inj + '*FOUND*xml -t sim_inspiral | wc -l'
# output,status = make_external_call(cmmnd, shell=True)
# numInjs += int(output)
# d.append(str(numInjs))
#td.append(d)
plots = []
text = {}
ifos = [args.ifo_tag[i:i+2] for i in range(0, len(args.ifo_tag), 2)]
plots.extend(['dist', 'dist_time'])
text['dist'] = 'Dist vs Mchirp'
text['dist_time'] = 'Dist vs Time'
for ifo in ifos:
plots.extend(['effdist_%s' % ifo[0].lower(),\
'effdist_time_%s' % ifo[0].lower()])
text['effdist_%s' % ifo[0].lower()] = 'Eff. dist. %s vs Mchirp' % ifo
text['effdist_time_%s' % ifo[0].lower()] = 'Eff. dist %s vs Time' % ifo
for row in plots:
pTag = text[row]
d = [pTag]
for inj in injList:
plot = markup.page()
p = "%s/efficiency_OFFTRIAL_1/found_missed_injections_%s.png"\
% (inj, row)
plot.a(href=p, title=pTag)
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
td.append(['Close injections without FAP = 0']+\
['<a href="%s/efficiency_OFFTRIAL_1/quiet_found_triggers.html"> '
'here</a>' % inj for inj in injList])
page = write_table(page, th, td)
return page
def write_recovery(page, injList):
"""
Write injection recovery plots to markup.page object page
"""
th = ['']+injList
td = []
plots = ['sky_error_time','sky_error_mchirp','sky_error_distance']
text = { 'sky_error_time':'Sky error vs time',\
'sky_error_mchirp':'Sky error vs mchirp',\
'sky_error_distance':'Sky error vs distance' }
for row in plots:
pTag = text[row]
d = [pTag]
for inj in injList:
plot = markup.page()
plot = markup.page()
p = "%s/efficiency_OFFTRIAL_1/found_%s.png" % (inj, row)
plot.a(href=p, title=pTag)
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
return page
def write_loudest_events(page, bins, onsource=False):
"""
Write injection chisq plots to markup.page object page
"""
th = ['']+['Mchirp %s - %s' % tuple(bin) for bin in bins]
td = []
plots = ['BestNR','SNR']
if onsource:
trial = 'ONSOURCE'
else:
trial = 'OFFTRIAL_1'
for pTag in plots:
row = pTag.lower()
d = [pTag]
for bin in bins:
b = '%s_%s' % tuple(bin)
plot = markup.page()
p = "%s/efficiency/%s_vs_fap_%s.png" % (trial, row, b)
plot.a(href=p, title="FAP versus %s" % pTag)
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
row = 'snruncut'
d = ['SNR after cuts <br> have been applied']
for bin in bins:
b = '%s_%s' % tuple(bin)
plot = markup.page()
p = "%s/efficiency/%s_vs_fap_%s.png" % (trial, row, b)
plot.a(href=p, title="FAP versus %s" % pTag)
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
page.add('For more details on the loudest offsource events see')
page.a(href='%s/efficiency/loudest_offsource_trigs.html' % (trial))
page.add('here.')
page.a.close()
return page
def write_exclusion_distances(page , trial, injList, massbins, reduced=False,
onsource=False):
file = open('%s/efficiency/loud_numbers.txt' % (trial), 'r')
FAPS = []
for line in file:
line = line.replace('\n','')
if line == "-2":
FAPS.append('No event')
else:
FAPS.append(line)
file.close()
th = ['']+['Mchirp %s - %s' % tuple(bin) for bin in massbins]
td = ['FAP']+FAPS
page = write_table(page, th, td)
page.add('For more details on the loudest onsource events see')
page.a(href='%s/efficiency/loudest_events.html' % (trial))
page.add('here.')
page.a.close()
if reduced or not injList:
return page
page.h3()
page.add('Detection efficiency plots - injections louder than loudest '
'background trigger')
page.h3.close()
th = injList
td = []
d = []
for inj in injList:
plot = markup.page()
p = "%s/efficiency_%s/BestNR_max_efficiency.png" % (inj, trial)
plot.a(href=p, title="Detection efficiency")
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
page.h3()
page.add('Exclusion distance plots - injections louder than loudest '
'foreground trigger')
page.h3.close()
th = injList
td = []
d = []
for inj in injList:
plot = markup.page()
p = "%s/efficiency_%s/BestNR_on_efficiency.png" % (inj, trial)
plot.a(href=p, title="Exclusion efficiency")
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
for percentile in [90, 50]:
page.h3()
page.add('%d%% confidence exclusion distances (Mpc)' % percentile)
th = injList
td = []
d = []
for inj in injList:
file = open('%s/efficiency_%s/exclusion_distance_%d.txt'
% (inj, trial, percentile), 'r')
for line in file:
line = line.replace('\n','')
excl_dist = float(line)
d.append(excl_dist)
file.close()
td.append(d)
page = write_table(page, th, td)
page.h3.close()
return page
def make_grb_segments_plot(wkflow, science_segs, trigger_time, trigger_name,
out_dir, coherent_seg=None, fail_criterion=None):
ifos = wkflow.ifos
if len(science_segs.keys()) == 0:
extent = segments.segment(int(wkflow.cp.get("workflow", "start-time")),
int(wkflow.cp.get("workflow", "end-time")))
else:
pltpad = [science_segs.extent_all()[1] - trigger_time,
trigger_time - science_segs.extent_all()[0]]
extent = segments.segmentlist([science_segs.extent_all(),
segments.segment(trigger_time - pltpad[0],
trigger_time + pltpad[1])]).extent()
ifo_colors = {}
for ifo in ifos:
ifo_colors[ifo] = ifo_color(ifo)
if ifo not in science_segs.keys():
science_segs[ifo] = segments.segmentlist([])
# Make plot
fig, subs = plt.subplots(len(ifos), sharey=True)
plt.xticks(rotation=20, ha='right')
for sub, ifo in zip(subs, ifos):
for seg in science_segs[ifo]:
sub.add_patch(Rectangle((seg[0], 0.1), abs(seg), 0.8,
facecolor=ifo_colors[ifo], edgecolor='none'))
if coherent_seg:
if len(science_segs[ifo]) > 0 and \
coherent_seg in science_segs[ifo]:
sub.plot([trigger_time, trigger_time], [0, 1], '-',
c='orange')
sub.add_patch(Rectangle((coherent_seg[0], 0),
abs(coherent_seg), 1, alpha=0.5,
facecolor='orange', edgecolor='none'))
else:
sub.plot([trigger_time, trigger_time], [0, 1], ':',
c='orange')
sub.plot([coherent_seg[0], coherent_seg[0]], [0, 1], '--',
c='orange', alpha=0.5)
sub.plot([coherent_seg[1], coherent_seg[1]], [0, 1], '--',
c='orange', alpha=0.5)
else:
sub.plot([trigger_time, trigger_time], [0, 1], ':k')
if fail_criterion:
if len(science_segs[ifo]) > 0:
style_str = '--'
else:
style_str = '-'
sub.plot([fail_criterion[0], fail_criterion[0]], [0, 1], style_str,
c='black', alpha=0.5)
sub.plot([fail_criterion[1], fail_criterion[1]], [0, 1], style_str,
c='black', alpha=0.5)
sub.set_frame_on(False)
sub.set_yticks([])
sub.set_ylabel(ifo, rotation=45)
sub.set_ylim([0, 1])
sub.set_xlim([float(extent[0]), float(extent[1])])
sub.get_xaxis().get_major_formatter().set_useOffset(False)
sub.get_xaxis().get_major_formatter().set_scientific(False)
sub.get_xaxis().tick_bottom()
if sub is subs[-1]:
sub.tick_params(labelsize=10, pad=1)
else:
sub.get_xaxis().set_ticks([])
sub.get_xaxis().set_ticklabels([])
xmin, xmax = fig.axes[-1].get_xaxis().get_view_interval()
ymin, _ = fig.axes[-1].get_yaxis().get_view_interval()
fig.axes[-1].add_artist(Line2D((xmin, xmax), (ymin, ymin), color='black',
linewidth=2))
fig.axes[-1].set_xlabel('GPS Time')
fig.axes[0].set_title('Science Segments for GRB%s' % trigger_name)
plt.tight_layout()
fig.subplots_adjust(hspace=0)
plot_name = 'GRB%s_segments.png' % trigger_name
plot_url = 'file://localhost%s/%s' % (out_dir, plot_name)
fig.savefig('%s/%s' % (out_dir, plot_name))
return [ifos, plot_name, extent, plot_url]
| gpl-3.0 |
ebolyen/q2d2 | q2d2/__init__.py | 2 | 16324 | #!/usr/bin/env python
__version__ = "0.0.0-dev"
import random
import io
import itertools
from collections import defaultdict, namedtuple
import hashlib
import os
import shutil
import glob
import math
from functools import partial
import numpy as np
import pandas as pd
from IPython.html import widgets
from IPython.html.widgets import interactive, fixed, IntSlider
from IPython.display import display
from scipy.optimize import minimize_scalar
import skbio
from skbio.diversity.beta import pw_distances
import skbio.diversity.alpha
from skbio.stats.ordination import pcoa
from skbio.stats import subsample_counts
from skbio.util import safe_md5
from q2d2.wui import metadata_controls
data_type_to_study_filename = {'sample_metadata': '.sample-md',
'otu_metadata': '.otu-md',
'unrarefied_biom': '.biom',
'rarefied_biom': '.rarefied-biom',
'tree': '.tree'}
# this may make sense as a database schema. can we use an existing schema, e.g. Qiita?
WorkflowCategory = namedtuple('WorkflowCategory', ['title'])
Workflow = namedtuple('Workflow', ['title', 'inputs', 'outputs', 'category_id'])
workflow_categories = {
'no-biom': WorkflowCategory('No BIOM table'),
'raw-biom': WorkflowCategory('Raw (unnormalized) BIOM table'),
'normalized-biom': WorkflowCategory('Normalized BIOM table')
}
workflows = {
'rarefy-biom': Workflow(
'Rarefy BIOM table', {'unrarefied_biom'}, {'rarefied_biom'}, 'raw-biom'),
'biom-to-taxa-plots': Workflow(
'Taxonomy plots', {'unrarefied_biom', 'sample_metadata', 'otu_metadata'}, {}, 'raw-biom'),
'biom-to-adiv': Workflow(
'Alpha diversity', {'rarefied_biom', 'sample_metadata'}, {}, 'normalized-biom'),
'biom-to-bdiv': Workflow(
'Beta diversity', {'rarefied_biom', 'sample_metadata'}, {}, 'normalized-biom')
}
def get_data_info(study_id):
existing_data_types = get_existing_data_types(study_id)
data_info = []
for data_type in data_type_to_study_filename:
filename = data_type_to_study_filename[data_type]
exists = data_type in existing_data_types
data_info.append((data_type, filename, exists))
return data_info
def get_workflow_info(workflow_id):
workflow = workflows[workflow_id]
return {
'workflow-id': workflow_id,
'title': workflow.title,
'inputs': list(workflow.inputs),
'outputs': list(workflow.outputs),
'category-id': workflow.category_id
}
def get_workflow_category_info(category_id):
return {
'category-id': category_id,
'title': workflow_categories[category_id].title
}
def get_study_state(study_id):
existing_data_types = get_existing_data_types(study_id)
state = {
'study-id': study_id,
'workflow': {'exe': [], 'nexe': []},
'data': {}
}
for workflow_id in workflows:
workflow = workflows[workflow_id]
if workflow.inputs.issubset(existing_data_types):
state['workflow']['exe'].append(workflow_id)
else:
state['workflow']['nexe'].append(workflow_id)
for data_type in existing_data_types:
data_filepath = get_data_filepath(data_type, study_id)
with open(data_filepath, 'rb') as data_file:
# should we be using sha256 instead?
md5 = safe_md5(data_file).hexdigest()
state['data'][data_filepath] = md5
return state
def get_system_info():
# what other info goes here? dependencies?
return {'version': __version__}
def get_existing_data_types(study_id):
data_types = set()
for data_type in data_type_to_study_filename:
try:
get_data_filepath(data_type, study_id)
except FileNotFoundError:
pass
else:
data_types.add(data_type)
return data_types
def create_index(study_id, command):
markdown_s = get_index_markdown(study_id, command)
output_filepath = os.path.join(study_id, 'index.md')
open(output_filepath, 'w').write(markdown_s)
def get_data_filepath(data_type, study_id):
data_filepath = os.path.join(study_id, data_type_to_study_filename[data_type])
if not os.path.exists(data_filepath):
raise FileNotFoundError(data_filepath)
return data_filepath
def create_input_files(study_id, **kwargs):
for input_type, input_filepath in kwargs.items():
study_filepath = data_type_to_study_filename[input_type]
study_filepath = os.path.join(study_id, study_filepath)
shutil.copy(input_filepath, study_filepath)
def load_table(rarefied=False):
if rarefied:
table_path = data_type_to_study_filename['rarefied_biom']
else:
table_path = data_type_to_study_filename['unrarefied_biom']
result = pd.read_csv(table_path, sep='\t', skiprows=1, index_col=0)
result.index = result.index.astype(str)
if 'taxonomy' in result:
result.drop('taxonomy', axis=1, inplace=True)
return result
def store_table(table, rarefied=False):
if rarefied:
table_path = data_type_to_study_filename['rarefied_biom']
else:
table_path = data_type_to_study_filename['unrarefied_biom']
with open(table_path, 'w') as table_file:
table_file.write('# Constructed by [q2d2](github.com/gregcaporaso/q2d2)\n')
table.to_csv(table_file, index_label="#OTU ID", sep='\t')
load_rarefied_table = partial(load_table, rarefied=True)
store_rarefied_table = partial(store_table, rarefied=True)
def load_tree():
return skbio.TreeNode.read(data_type_to_study_filename['tree'], format='newick')
def load_sample_metadata():
return pd.read_csv(data_type_to_study_filename['sample_metadata'], sep='\t', index_col=0)
def load_otu_metadata():
return pd.read_csv(data_type_to_study_filename['otu_metadata'], sep='\t', names=['OTU ID', 'taxonomy'],
index_col=0, usecols=[0, 1], dtype=object)
def biom_to_adiv(metric, biom, tree=None):
metric_f = getattr(skbio.diversity.alpha, metric)
results = []
for e in biom.columns:
if metric == 'faith_pd':
results.append(metric_f(biom[e], biom.index, tree))
else:
results.append(metric_f(biom[e]))
return pd.Series(results, index=biom.columns)
def compute_alphas(otu_table, tree=None,
metrics=['chao1',
'faith_pd',
'observed_otus']):
alphas = {}
for metric in metrics:
alpha = biom_to_adiv(metric, otu_table, tree)
alphas[metric] = alpha
return alphas
def biom_to_dm(metric, biom, tree=None):
return pw_distances(metric=metric, counts=biom.T, ids=biom.columns)
def dm_to_pcoa(dm, sample_md, category):
title = "Samples colored by %s." % category
pcoa_results = pcoa(dm)
_ = pcoa_results.plot(df=sample_md,
column=category,
axis_labels=['PC 1', 'PC 2', 'PC 3'],
title=title,
s=35)
def table_summary(df):
print("Samples: ", len(df.columns))
print("Observations: ", len(df.index))
print("Sequence/sample count detail:")
print(df.sum().describe())
def get_workflow_template_filepath(workflow_id):
base_dir = os.path.abspath(os.path.split(__file__)[0])
return os.path.join(base_dir, "markdown", "%s.md" % workflow_id)
def get_workflow_filepath(workflow_id, study_id):
return os.path.join(study_id, "%s.md" % workflow_id)
def create_workflow(workflow_id, study_id):
workflow_template_filepath = get_workflow_template_filepath(workflow_id)
workflow_filepath = get_workflow_filepath(workflow_id, study_id)
if not os.path.exists(workflow_filepath):
shutil.copy(workflow_template_filepath, workflow_filepath)
return workflow_filepath
def delete_workflow(workflow_id, study_id):
workflow_filepath = get_workflow_filepath(workflow_id, study_id)
os.remove(workflow_filepath)
def get_index_markdown(study_id, command):
index_md_template = open(get_workflow_template_filepath('index')).read()
md_fps = glob.glob(os.path.join(study_id, '*.md'))
md_fps.sort()
toc = []
for md_fp in md_fps:
md_fn = os.path.split(md_fp)[1]
title = os.path.splitext(md_fn)[0].replace('-', ' ').title()
toc.append(' * [%s](%s)' % (title, md_fn))
toc = '\n'.join(toc)
result = index_md_template.format(toc, study_id, __version__, command)
return result
def _summarize_even_sampling_depth(even_sampling_depth, counts):
samples_retained = (counts >= even_sampling_depth)
num_samples_retained = samples_retained.sum()
num_sequences_retained = num_samples_retained * even_sampling_depth
return samples_retained, num_samples_retained, num_sequences_retained
def _get_depth_for_max_sequence_count(counts):
"""Find the even sampling depth that retains the most sequences."""
count_summary = counts.describe()
def f(d):
return -1 * _summarize_even_sampling_depth(d, counts)[2]
res = minimize_scalar(f,
bounds=(count_summary['min'], count_summary['max']),
method='bounded')
return int(np.floor(res.x))
def get_default_even_sampling_depth(biom):
counts = biom.sum()
return _get_depth_for_max_sequence_count(counts)
def explore_sampling_depth(biom):
import seaborn as sns
counts = biom.sum()
count_summary = counts.describe()
total_num_samples = len(counts)
total_num_sequences = counts.sum()
depth_for_max_sequence_count = _get_depth_for_max_sequence_count(counts)
sampling_depth_slider = IntSlider(min=count_summary['min'],
max=count_summary['max'],
step=10 ** (math.log(count_summary['max'], 10) - 2),
value=depth_for_max_sequence_count)
default_samples_retained, default_num_samples_retained, default_num_sequences_retained = \
_summarize_even_sampling_depth(depth_for_max_sequence_count, counts)
default_percent_samples_retained = default_num_samples_retained * 100 / total_num_samples
default_percent_sequences_retained = default_num_sequences_retained * 100 / total_num_sequences
label_s = "Depth {0}: {1:.2f}% of sequences and {2:.2f}% of samples retained."
def f(even_sampling_depth):
samples_retained, num_samples_retained, num_sequences_retained = \
_summarize_even_sampling_depth(even_sampling_depth, counts)
percent_samples_retained = num_samples_retained * 100 / total_num_samples
percent_sequences_retained = num_sequences_retained * 100 / total_num_sequences
ax = sns.distplot(counts)
ax.set_xlabel("Number of sequences per sample")
ax.set_ylabel("Frequency")
line_label = label_s.format(depth_for_max_sequence_count,
default_percent_sequences_retained,
default_percent_samples_retained)
ax.plot([depth_for_max_sequence_count, depth_for_max_sequence_count], ax.get_ylim(),
'k--', label=line_label)
line_label = label_s.format(even_sampling_depth,
percent_sequences_retained,
percent_samples_retained)
ax.plot([even_sampling_depth, even_sampling_depth], ax.get_ylim(),
'k-', label=line_label)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
def reset_depth(_):
sampling_depth_slider.value = depth_for_max_sequence_count
reset = widgets.Button(icon='fa-refresh')
reset.on_click(reset_depth)
w = interactive(f, even_sampling_depth=sampling_depth_slider)
display(widgets.HBox(children=[w, reset]))
def rarify(biom, even_sampling_depth):
data = []
sample_ids = []
for e in biom.columns:
count_vector = biom[e]
if count_vector.sum() < even_sampling_depth:
continue
else:
sample_ids.append(e)
data.append(subsample_counts(count_vector.astype(int), even_sampling_depth))
return pd.DataFrame(np.asarray(data).T, index=biom.index, columns=sample_ids)
def filter_dm_and_map(dm, map_df):
ids_to_exclude = set(dm.ids) - set(map_df.index.values)
ids_to_keep = set(dm.ids) - ids_to_exclude
filtered_dm = dm.filter(ids_to_keep)
filtered_map = map_df.loc[ids_to_keep]
return filtered_dm, filtered_map
def get_within_between_distances(map_df, dm, col):
filtered_dm, filtered_map = filter_dm_and_map(dm, map_df)
groups = []
distances = []
map_dict = filtered_map[col].to_dict()
for id_1, id_2 in itertools.combinations(filtered_map.index.tolist(), 2):
row = []
if map_dict[id_1] == map_dict[id_2]:
groups.append('Within')
else:
groups.append('Between')
distances.append(filtered_dm[(id_1, id_2)])
groups = zip(groups, distances)
distances_df = pd.DataFrame(data=list(groups), columns=['Groups', 'Distance'])
return distances_df
def distance_histogram(dm, category, metadata, metric='Distance', order=['Within', 'Between']):
import seaborn as sns
within_bw_distances = get_within_between_distances(metadata, dm, category)
ax = sns.violinplot(x='Groups', y='Distance', data=within_bw_distances, order=order, orient='v')
ax.set_xlabel(category)
ax.set_ylabel(metric)
def interactive_distance_histograms(dm, sample_metadata):
def on_update(category, metadata, check_within, check_between):
order = []
if check_within:
order.append('Within')
if check_between:
order.append('Between')
distance_histogram(dm, category, metadata, order=order)
check_within = widgets.Checkbox(description='Show within category', value=True)
check_between = widgets.Checkbox(description='Show between category', value=True)
extras = widgets.VBox(children=[check_within, check_between])
return metadata_controls(sample_metadata, on_update, extras)
def distance_violinplots(dm, category, metadata, metric=None, order=['Within', 'Between']):
import seaborn as sns
within_bw_distances = get_within_between_distances(metadata, dm, category)
ax = sns.violinplot(x='Groups', y='Distance', data=within_bw_distances, order=order, orient='v')
ax.set_xlabel(category)
ax.set_ylabel(metric)
return ax
def interactive_distance_violinplots(dms, sample_metadata):
def on_update(category, metadata, metric, check_within, check_between):
order = []
if check_within:
order.append('Within')
if check_between:
order.append('Between')
dm = dms[metric]
distance_violinplots(dm, category, metadata, metric, order=order)
check_within = widgets.Checkbox(description='Show within category', value=True)
check_between = widgets.Checkbox(description='Show between category', value=True)
metric_but = widgets.Dropdown(options=list(dms.keys()), description='Metrics')
extras = widgets.VBox(children=[metric_but, check_within, check_between])
return metadata_controls(sample_metadata, on_update, extras)
def compute_distance_matrices(
otu_table,
tree=None,
metrics=['weighted_unifrac', 'unweighted_unifrac', 'braycurtis', 'jaccard']):
dms = {}
for metric in metrics:
dm = pw_distances(metric, otu_table.T.values, otu_table.columns.tolist(),
tree=tree, otu_ids=otu_table.index.tolist())
dms[metric] = dm
return dms
def interactive_plot_pcoa(metadata, dms):
def on_update(category, metadata, metric):
dm = dms[metric]
filtered_dm, _ = filter_dm_and_map(dm, metadata)
pc = pcoa(filtered_dm)
pc.plot(df=metadata,
column=category,
axis_labels=['PC 1', 'PC 2', 'PC 3'],
s=35).set_size_inches(12, 9)
metric_but = widgets.Dropdown(options=list(dms.keys()), description='Metrics')
extras = widgets.VBox(children=[metric_but])
return metadata_controls(metadata, on_update, extras) | bsd-3-clause |
wzbozon/statsmodels | statsmodels/stats/tests/test_diagnostic.py | 21 | 40146 | # -*- coding: utf-8 -*-
"""Tests for Regression Diagnostics and Specification Tests
Created on Thu Feb 09 13:19:47 2012
Author: Josef Perktold
License: BSD-3
currently all tests are against R
"""
#import warnings
#warnings.simplefilter("default")
# ResourceWarning doesn't exist in python 2
#warnings.simplefilter("ignore", ResourceWarning)
import os
import numpy as np
from numpy.testing import (assert_, assert_almost_equal, assert_equal,
assert_approx_equal, assert_allclose)
from nose import SkipTest
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
import json
#import statsmodels.sandbox.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
cur_dir = os.path.abspath(os.path.dirname(__file__))
def compare_t_est(sp, sp_dict, decimal=(14, 14)):
assert_almost_equal(sp[0], sp_dict['statistic'], decimal=decimal[0])
assert_almost_equal(sp[1], sp_dict['pvalue'], decimal=decimal[1])
def notyet_atst():
d = macrodata.load().data
realinv = d['realinv']
realgdp = d['realgdp']
realint = d['realint']
endog = realinv
exog = add_constant(np.c_[realgdp, realint])
res_ols1 = OLS(endog, exog).fit()
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv']))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))
lint = d['realint'][:-1]
tbilrate = d['tbilrate'][:-1]
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, lint])
exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate])
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
#the following were done accidentally with res_ols1 in R,
#with original Greene data
params = np.array([-272.3986041341653, 0.1779455206941112,
0.2149432424658157])
cov_hac_4 = np.array([1321.569466333051, -0.2318836566017612,
37.01280466875694, -0.2318836566017614, 4.602339488102263e-05,
-0.0104687835998635, 37.012804668757, -0.0104687835998635,
21.16037144168061]).reshape(3,3, order='F')
cov_hac_10 = np.array([2027.356101193361, -0.3507514463299015,
54.81079621448568, -0.350751446329901, 6.953380432635583e-05,
-0.01268990195095196, 54.81079621448564, -0.01268990195095195,
22.92512402151113]).reshape(3,3, order='F')
#goldfeld-quandt
het_gq_greater = dict(statistic=13.20512768685082, df1=99, df2=98,
pvalue=1.246141976112324e-30, distr='f')
het_gq_less = dict(statistic=13.20512768685082, df1=99, df2=98, pvalue=1.)
het_gq_2sided = dict(statistic=13.20512768685082, df1=99, df2=98,
pvalue=1.246141976112324e-30, distr='f')
#goldfeld-quandt, fraction = 0.5
het_gq_greater_2 = dict(statistic=87.1328934692124, df1=48, df2=47,
pvalue=2.154956842194898e-33, distr='f')
gq = smsdia.het_goldfeldquandt(endog, exog, split=0.5)
compare_t_est(gq, het_gq_greater, decimal=(13, 14))
assert_equal(gq[-1], 'increasing')
harvey_collier = dict(stat=2.28042114041313, df=199,
pvalue=0.02364236161988260, distr='t')
#hc = harvtest(fm, order.by=ggdp , data = list())
harvey_collier_2 = dict(stat=0.7516918462158783, df=199,
pvalue=0.4531244858006127, distr='t')
##################################
class TestDiagnosticG(object):
def __init__(self):
d = macrodata.load().data
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv']))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))
lint = d['realint'][:-1]
tbilrate = d['tbilrate'][:-1]
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, lint])
exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate])
exogg3 = add_constant(np.c_[gs_l_realgdp])
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
res_ols3 = OLS(endogg, exogg3).fit()
self.res = res_ols
self.res2 = res_ols2
self.res3 = res_ols3
self.endog = self.res.model.endog
self.exog = self.res.model.exog
def test_basic(self):
#mainly to check I got the right regression
#> mkarray(fm$coefficients, "params")
params = np.array([-9.48167277465485, 4.3742216647032,
-0.613996969478989])
assert_almost_equal(self.res.params, params, decimal=12)
def test_hac(self):
res = self.res
#> nw = NeweyWest(fm, lag = 4, prewhite = FALSE, verbose=TRUE)
#> nw2 = NeweyWest(fm, lag=10, prewhite = FALSE, verbose=TRUE)
#> mkarray(nw, "cov_hac_4")
cov_hac_4 = np.array([1.385551290884014, -0.3133096102522685,
-0.0597207976835705, -0.3133096102522685, 0.1081011690351306,
0.000389440793564336, -0.0597207976835705, 0.000389440793564339,
0.0862118527405036]).reshape(3,3, order='F')
#> mkarray(nw2, "cov_hac_10")
cov_hac_10 = np.array([1.257386180080192, -0.2871560199899846,
-0.03958300024627573, -0.2871560199899845, 0.1049107028987101,
0.0003896205316866944, -0.03958300024627578, 0.0003896205316866961,
0.0985539340694839]).reshape(3,3, order='F')
cov = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov)
assert_almost_equal(cov, cov_hac_4, decimal=14)
assert_almost_equal(bse_hac, np.sqrt(np.diag(cov)), decimal=14)
cov = sw.cov_hac_simple(res, nlags=10, use_correction=False)
bse_hac = sw.se_cov(cov)
assert_almost_equal(cov, cov_hac_10, decimal=14)
assert_almost_equal(bse_hac, np.sqrt(np.diag(cov)), decimal=14)
def test_het_goldfeldquandt(self):
#TODO: test options missing
#> gq = gqtest(fm, alternative='greater')
#> mkhtest_f(gq, 'het_gq_greater', 'f')
het_gq_greater = dict(statistic=0.5313259064778423,
pvalue=0.9990217851193723,
parameters=(98, 98), distr='f')
#> gq = gqtest(fm, alternative='less')
#> mkhtest_f(gq, 'het_gq_less', 'f')
het_gq_less = dict(statistic=0.5313259064778423,
pvalue=0.000978214880627621,
parameters=(98, 98), distr='f')
#> gq = gqtest(fm, alternative='two.sided')
#> mkhtest_f(gq, 'het_gq_two_sided', 'f')
het_gq_two_sided = dict(statistic=0.5313259064778423,
pvalue=0.001956429761255241,
parameters=(98, 98), distr='f')
#> gq = gqtest(fm, fraction=0.1, alternative='two.sided')
#> mkhtest_f(gq, 'het_gq_two_sided_01', 'f')
het_gq_two_sided_01 = dict(statistic=0.5006976835928314,
pvalue=0.001387126702579789,
parameters=(88, 87), distr='f')
#> gq = gqtest(fm, fraction=0.5, alternative='two.sided')
#> mkhtest_f(gq, 'het_gq_two_sided_05', 'f')
het_gq_two_sided_05 = dict(statistic=0.434815645134117,
pvalue=0.004799321242905568,
parameters=(48, 47), distr='f')
endogg, exogg = self.endog, self.exog
#tests
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5)
compare_t_est(gq, het_gq_greater, decimal=(14, 14))
assert_equal(gq[-1], 'increasing')
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5,
alternative='decreasing')
compare_t_est(gq, het_gq_less, decimal=(14, 14))
assert_equal(gq[-1], 'decreasing')
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5,
alternative='two-sided')
compare_t_est(gq, het_gq_two_sided, decimal=(14, 14))
assert_equal(gq[-1], 'two-sided')
#TODO: forcing the same split as R 202-90-90-1=21
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=90, drop=21,
alternative='two-sided')
compare_t_est(gq, het_gq_two_sided_01, decimal=(14, 14))
assert_equal(gq[-1], 'two-sided')
#TODO other options ???
def test_het_breusch_pagan(self):
res = self.res
bptest = dict(statistic=0.709924388395087, pvalue=0.701199952134347,
parameters=(2,), distr='f')
bp = smsdia.het_breuschpagan(res.resid, res.model.exog)
compare_t_est(bp, bptest, decimal=(12, 12))
def test_het_white(self):
res = self.res
#TODO: regressiontest, compare with Greene or Gretl or Stata
hw = smsdia.het_white(res.resid, res.model.exog)
hw_values = (33.503722896538441, 2.9887960597830259e-06,
7.7945101228430946, 1.0354575277704231e-06)
assert_almost_equal(hw, hw_values)
def test_het_arch(self):
#test het_arch and indirectly het_lm against R
#> library(FinTS)
#> at = ArchTest(residuals(fm), lags=4)
#> mkhtest(at, 'archtest_4', 'chi2')
archtest_4 = dict(statistic=3.43473400836259,
pvalue=0.487871315392619, parameters=(4,),
distr='chi2')
#> at = ArchTest(residuals(fm), lags=12)
#> mkhtest(at, 'archtest_12', 'chi2')
archtest_12 = dict(statistic=8.648320999014171,
pvalue=0.732638635007718, parameters=(12,),
distr='chi2')
at4 = smsdia.het_arch(self.res.resid, maxlag=4)
at12 = smsdia.het_arch(self.res.resid, maxlag=12)
compare_t_est(at4[:2], archtest_4, decimal=(12, 13))
compare_t_est(at12[:2], archtest_12, decimal=(12, 13))
def test_het_arch2(self):
#test autolag options, this also test het_lm
#unfortunately optimal lag=1 for this data
resid = self.res.resid
res1 = smsdia.het_arch(resid, maxlag=1, autolag=None, store=True)
rs1 = res1[-1]
res2 = smsdia.het_arch(resid, maxlag=5, autolag='aic', store=True)
rs2 = res2[-1]
assert_almost_equal(rs2.resols.params, rs1.resols.params, decimal=13)
assert_almost_equal(res2[:4], res1[:4], decimal=13)
#test that smallest lag, maxlag=1 works
res3 = smsdia.het_arch(resid, maxlag=1, autolag='aic')
assert_almost_equal(res3[:4], res1[:4], decimal=13)
def test_acorr_breusch_godfrey(self):
res = self.res
#bgf = bgtest(fm, order = 4, type="F")
breuschgodfrey_f = dict(statistic=1.179280833676792,
pvalue=0.321197487261203,
parameters=(4,195,), distr='f')
#> bgc = bgtest(fm, order = 4, type="Chisq")
#> mkhtest(bgc, "breuschpagan_c", "chi2")
breuschgodfrey_c = dict(statistic=4.771042651230007,
pvalue=0.3116067133066697,
parameters=(4,), distr='chi2')
bg = smsdia.acorr_breusch_godfrey(res, nlags=4)
bg_r = [breuschgodfrey_c['statistic'], breuschgodfrey_c['pvalue'],
breuschgodfrey_f['statistic'], breuschgodfrey_f['pvalue']]
assert_almost_equal(bg, bg_r, decimal=13)
# check that lag choice works
bg2 = smsdia.acorr_breusch_godfrey(res, nlags=None)
bg3 = smsdia.acorr_breusch_godfrey(res, nlags=14)
assert_almost_equal(bg2, bg3, decimal=13)
def test_acorr_ljung_box(self):
res = self.res
#> bt = Box.test(residuals(fm), lag=4, type = "Ljung-Box")
#> mkhtest(bt, "ljung_box_4", "chi2")
ljung_box_4 = dict(statistic=5.23587172795227, pvalue=0.263940335284713,
parameters=(4,), distr='chi2')
#> bt = Box.test(residuals(fm), lag=4, type = "Box-Pierce")
#> mkhtest(bt, "ljung_box_bp_4", "chi2")
ljung_box_bp_4 = dict(statistic=5.12462932741681,
pvalue=0.2747471266820692,
parameters=(4,), distr='chi2')
#ddof correction for fitted parameters in ARMA(p,q) fitdf=p+q
#> bt = Box.test(residuals(fm), lag=4, type = "Ljung-Box", fitdf=2)
#> mkhtest(bt, "ljung_box_4df2", "chi2")
ljung_box_4df2 = dict(statistic=5.23587172795227,
pvalue=0.0729532930400377,
parameters=(2,), distr='chi2')
#> bt = Box.test(residuals(fm), lag=4, type = "Box-Pierce", fitdf=2)
#> mkhtest(bt, "ljung_box_bp_4df2", "chi2")
ljung_box_bp_4df2 = dict(statistic=5.12462932741681,
pvalue=0.0771260128929921,
parameters=(2,), distr='chi2')
lb, lbpval, bp, bppval = smsdia.acorr_ljungbox(res.resid, 4,
boxpierce=True)
compare_t_est([lb[-1], lbpval[-1]], ljung_box_4, decimal=(13, 14))
compare_t_est([bp[-1], bppval[-1]], ljung_box_bp_4, decimal=(13, 14))
def test_harvey_collier(self):
#> hc = harvtest(fm, order.by = NULL, data = list())
#> mkhtest_f(hc, 'harvey_collier', 't')
harvey_collier = dict(statistic=0.494432160939874,
pvalue=0.6215491310408242,
parameters=(198), distr='t')
#> hc2 = harvtest(fm, order.by=ggdp , data = list())
#> mkhtest_f(hc2, 'harvey_collier_2', 't')
harvey_collier_2 = dict(statistic=1.42104628340473,
pvalue=0.1568762892441689,
parameters=(198), distr='t')
hc = smsdia.linear_harvey_collier(self.res)
compare_t_est(hc, harvey_collier, decimal=(12, 12))
def test_rainbow(self):
#rainbow test
#> rt = raintest(fm)
#> mkhtest_f(rt, 'raintest', 'f')
raintest = dict(statistic=0.6809600116739604, pvalue=0.971832843583418,
parameters=(101, 98), distr='f')
#> rt = raintest(fm, center=0.4)
#> mkhtest_f(rt, 'raintest_center_04', 'f')
raintest_center_04 = dict(statistic=0.682635074191527,
pvalue=0.971040230422121,
parameters=(101, 98), distr='f')
#> rt = raintest(fm, fraction=0.4)
#> mkhtest_f(rt, 'raintest_fraction_04', 'f')
raintest_fraction_04 = dict(statistic=0.565551237772662,
pvalue=0.997592305968473,
parameters=(122, 77), distr='f')
#> rt = raintest(fm, order.by=ggdp)
#Warning message:
#In if (order.by == "mahalanobis") { :
# the condition has length > 1 and only the first element will be used
#> mkhtest_f(rt, 'raintest_order_gdp', 'f')
raintest_order_gdp = dict(statistic=1.749346160513353,
pvalue=0.002896131042494884,
parameters=(101, 98), distr='f')
rb = smsdia.linear_rainbow(self.res)
compare_t_est(rb, raintest, decimal=(13, 14))
rb = smsdia.linear_rainbow(self.res, frac=0.4)
compare_t_est(rb, raintest_fraction_04, decimal=(13, 14))
def test_compare_lr(self):
res = self.res
res3 = self.res3 #nested within res
#lrtest
#lrt = lrtest(fm, fm2)
#Model 1: ginv ~ ggdp + lint
#Model 2: ginv ~ ggdp
lrtest = dict(loglike1=-763.9752181602237, loglike2=-766.3091902020184,
chi2value=4.66794408358942, pvalue=0.03073069384028677,
df=(4,3,1))
lrt = res.compare_lr_test(res3)
assert_almost_equal(lrt[0], lrtest['chi2value'], decimal=11)
assert_almost_equal(lrt[1], lrtest['pvalue'], decimal=11)
waldtest = dict(fvalue=4.65216373312492, pvalue=0.03221346195239025,
df=(199,200,1))
wt = res.compare_f_test(res3)
assert_almost_equal(wt[0], waldtest['fvalue'], decimal=11)
assert_almost_equal(wt[1], waldtest['pvalue'], decimal=11)
def test_compare_nonnested(self):
res = self.res
res2 = self.res2
#jt = jtest(fm, lm(ginv ~ ggdp + tbilrate))
#Estimate Std. Error t value Pr(>|t|)
jtest = [('M1 + fitted(M2)', 1.591505670785873, 0.7384552861695823,
2.155182176352370, 0.032354572525314450, '*'),
('M2 + fitted(M1)', 1.305687653016899, 0.4808385176653064,
2.715438978051544, 0.007203854534057954, '**')]
jt1 = smsdia.compare_j(res2, res)
assert_almost_equal(jt1, jtest[0][3:5], decimal=13)
jt2 = smsdia.compare_j(res, res2)
assert_almost_equal(jt2, jtest[1][3:5], decimal=14)
#Estimate Std. Error z value Pr(>|z|)
coxtest = [('fitted(M1) ~ M2', -0.782030488930356, 0.599696502782265,
-1.304043770977755, 1.922186587840554e-01, ' '),
('fitted(M2) ~ M1', -2.248817107408537, 0.392656854330139,
-5.727181590258883, 1.021128495098556e-08, '***')]
ct1 = smsdia.compare_cox(res, res2)
assert_almost_equal(ct1, coxtest[0][3:5], decimal=13)
ct2 = smsdia.compare_cox(res2, res)
assert_almost_equal(ct2, coxtest[1][3:5], decimal=12)
#TODO should be approx
# Res.Df Df F Pr(>F)
encomptest = [('M1 vs. ME', 198, -1, 4.644810213266983,
0.032354572525313666, '*'),
('M2 vs. ME', 198, -1, 7.373608843521585,
0.007203854534058054, '**')]
# Estimate Std. Error t value
petest = [('M1 + log(fit(M1))-fit(M2)', -229.281878354594596,
44.5087822087058598, -5.15139, 6.201281252449979e-07),
('M2 + fit(M1)-exp(fit(M2))', 0.000634664704814,
0.0000462387010349, 13.72583, 1.319536115230356e-30)]
def test_cusum_ols(self):
#R library(strucchange)
#> sc = sctest(ginv ~ ggdp + lint, type="OLS-CUSUM")
#> mkhtest(sc, 'cusum_ols', 'BB')
cusum_ols = dict(statistic=1.055750610401214, pvalue=0.2149567397376543,
parameters=(), distr='BB') #Brownian Bridge
k_vars=3
cs_ols = smsdia.breaks_cusumolsresid(self.res.resid, ddof=k_vars) #
compare_t_est(cs_ols, cusum_ols, decimal=(12, 12))
def test_breaks_hansen(self):
#> sc = sctest(ginv ~ ggdp + lint, type="Nyblom-Hansen")
#> mkhtest(sc, 'breaks_nyblom_hansen', 'BB')
breaks_nyblom_hansen = dict(statistic=1.0300792740544484,
pvalue=0.1136087530212015,
parameters=(), distr='BB')
bh = smsdia.breaks_hansen(self.res)
assert_almost_equal(bh[0], breaks_nyblom_hansen['statistic'],
decimal=13)
#TODO: breaks_hansen doesn't return pvalues
def test_recursive_residuals(self):
reccumres_standardize = np.array([-2.151, -3.748, -3.114, -3.096,
-1.865, -2.230, -1.194, -3.500, -3.638, -4.447, -4.602, -4.631, -3.999,
-4.830, -5.429, -5.435, -6.554, -8.093, -8.567, -7.532, -7.079, -8.468,
-9.320, -12.256, -11.932, -11.454, -11.690, -11.318, -12.665, -12.842,
-11.693, -10.803, -12.113, -12.109, -13.002, -11.897, -10.787, -10.159,
-9.038, -9.007, -8.634, -7.552, -7.153, -6.447, -5.183, -3.794, -3.511,
-3.979, -3.236, -3.793, -3.699, -5.056, -5.724, -4.888, -4.309, -3.688,
-3.918, -3.735, -3.452, -2.086, -6.520, -7.959, -6.760, -6.855, -6.032,
-4.405, -4.123, -4.075, -3.235, -3.115, -3.131, -2.986, -1.813, -4.824,
-4.424, -4.796, -4.000, -3.390, -4.485, -4.669, -4.560, -3.834, -5.507,
-3.792, -2.427, -1.756, -0.354, 1.150, 0.586, 0.643, 1.773, -0.830,
-0.388, 0.517, 0.819, 2.240, 3.791, 3.187, 3.409, 2.431, 0.668, 0.957,
-0.928, 0.327, -0.285, -0.625, -2.316, -1.986, -0.744, -1.396, -1.728,
-0.646, -2.602, -2.741, -2.289, -2.897, -1.934, -2.532, -3.175, -2.806,
-3.099, -2.658, -2.487, -2.515, -2.224, -2.416, -1.141, 0.650, -0.947,
0.725, 0.439, 0.885, 2.419, 2.642, 2.745, 3.506, 4.491, 5.377, 4.624,
5.523, 6.488, 6.097, 5.390, 6.299, 6.656, 6.735, 8.151, 7.260, 7.846,
8.771, 8.400, 8.717, 9.916, 9.008, 8.910, 8.294, 8.982, 8.540, 8.395,
7.782, 7.794, 8.142, 8.362, 8.400, 7.850, 7.643, 8.228, 6.408, 7.218,
7.699, 7.895, 8.725, 8.938, 8.781, 8.350, 9.136, 9.056, 10.365, 10.495,
10.704, 10.784, 10.275, 10.389, 11.586, 11.033, 11.335, 11.661, 10.522,
10.392, 10.521, 10.126, 9.428, 9.734, 8.954, 9.949, 10.595, 8.016,
6.636, 6.975])
rr = smsdia.recursive_olsresiduals(self.res, skip=3, alpha=0.95)
assert_equal(np.round(rr[5][1:], 3), reccumres_standardize) #extra zero in front
#assert_equal(np.round(rr[3][4:], 3), np.diff(reccumres_standardize))
assert_almost_equal(rr[3][4:], np.diff(reccumres_standardize),3)
assert_almost_equal(rr[4][3:].std(ddof=1), 10.7242, decimal=4)
#regression number, visually checked with graph from gretl
ub0 = np.array([ 13.37318571, 13.50758959, 13.64199346, 13.77639734,
13.91080121])
ub1 = np.array([ 39.44753774, 39.58194162, 39.7163455 , 39.85074937,
39.98515325])
lb, ub = rr[6]
assert_almost_equal(ub[:5], ub0, decimal=7)
assert_almost_equal(lb[:5], -ub0, decimal=7)
assert_almost_equal(ub[-5:], ub1, decimal=7)
assert_almost_equal(lb[-5:], -ub1, decimal=7)
#test a few values with explicit OLS
endog = self.res.model.endog
exog = self.res.model.exog
params = []
ypred = []
for i in range(3,10):
resi = OLS(endog[:i], exog[:i]).fit()
ypred.append(resi.model.predict(resi.params, exog[i]))
params.append(resi.params)
assert_almost_equal(rr[2][3:10], ypred, decimal=12)
assert_almost_equal(rr[0][3:10], endog[3:10] - ypred, decimal=12)
assert_almost_equal(rr[1][2:9], params, decimal=12)
def test_normality(self):
res = self.res
#> library(nortest) #Lilliefors (Kolmogorov-Smirnov) normality test
#> lt = lillie.test(residuals(fm))
#> mkhtest(lt, "lilliefors", "-")
lilliefors1 = dict(statistic=0.0723390908786589,
pvalue=0.01204113540102896, parameters=(), distr='-')
#> lt = lillie.test(residuals(fm)**2)
#> mkhtest(lt, "lilliefors", "-")
lilliefors2 = dict(statistic=0.301311621898024,
pvalue=1.004305736618051e-51,
parameters=(), distr='-')
#> lt = lillie.test(residuals(fm)[1:20])
#> mkhtest(lt, "lilliefors", "-")
lilliefors3 = dict(statistic=0.1333956004203103,
pvalue=0.4618672180799566, parameters=(), distr='-')
lf1 = smsdia.lilliefors(res.resid)
lf2 = smsdia.lilliefors(res.resid**2)
lf3 = smsdia.lilliefors(res.resid[:20])
compare_t_est(lf1, lilliefors1, decimal=(14, 14))
compare_t_est(lf2, lilliefors2, decimal=(14, 14)) #pvalue very small
assert_approx_equal(lf2[1], lilliefors2['pvalue'], significant=10)
compare_t_est(lf3, lilliefors3, decimal=(14, 1))
#R uses different approximation for pvalue in last case
#> ad = ad.test(residuals(fm))
#> mkhtest(ad, "ad3", "-")
adr1 = dict(statistic=1.602209621518313, pvalue=0.0003937979149362316,
parameters=(), distr='-')
#> ad = ad.test(residuals(fm)**2)
#> mkhtest(ad, "ad3", "-")
adr2 = dict(statistic=np.inf, pvalue=np.nan, parameters=(), distr='-')
#> ad = ad.test(residuals(fm)[1:20])
#> mkhtest(ad, "ad3", "-")
adr3 = dict(statistic=0.3017073732210775, pvalue=0.5443499281265933,
parameters=(), distr='-')
ad1 = smsdia.normal_ad(res.resid)
compare_t_est(ad1, adr1, decimal=(11, 13))
ad2 = smsdia.normal_ad(res.resid**2)
assert_(np.isinf(ad2[0]))
ad3 = smsdia.normal_ad(res.resid[:20])
compare_t_est(ad3, adr3, decimal=(11, 12))
def test_influence(self):
res = self.res
#this test is slow
infl = oi.OLSInfluence(res)
fp = open(os.path.join(cur_dir,"results/influence_lsdiag_R.json"))
lsdiag = json.load(fp)
#basic
assert_almost_equal(np.array(lsdiag['cov.scaled']).reshape(3, 3),
res.cov_params(), decimal=14)
assert_almost_equal(np.array(lsdiag['cov.unscaled']).reshape(3, 3),
res.normalized_cov_params, decimal=14)
c0, c1 = infl.cooks_distance #TODO: what's c1
assert_almost_equal(c0, lsdiag['cooks'], decimal=14)
assert_almost_equal(infl.hat_matrix_diag, lsdiag['hat'], decimal=14)
assert_almost_equal(infl.resid_studentized_internal,
lsdiag['std.res'], decimal=14)
#slow:
#infl._get_all_obs() #slow, nobs estimation loop, called implicitly
dffits, dffth = infl.dffits
assert_almost_equal(dffits, lsdiag['dfits'], decimal=14)
assert_almost_equal(infl.resid_studentized_external,
lsdiag['stud.res'], decimal=14)
import pandas
fn = os.path.join(cur_dir,"results/influence_measures_R.csv")
infl_r = pandas.read_csv(fn, index_col=0)
conv = lambda s: 1 if s=='TRUE' else 0
fn = os.path.join(cur_dir,"results/influence_measures_bool_R.csv")
#not used yet:
#infl_bool_r = pandas.read_csv(fn, index_col=0,
# converters=dict(zip(lrange(7),[conv]*7)))
infl_r2 = np.asarray(infl_r)
assert_almost_equal(infl.dfbetas, infl_r2[:,:3], decimal=13)
assert_almost_equal(infl.cov_ratio, infl_r2[:,4], decimal=14)
#duplicates
assert_almost_equal(dffits, infl_r2[:,3], decimal=14)
assert_almost_equal(c0, infl_r2[:,5], decimal=14)
assert_almost_equal(infl.hat_matrix_diag, infl_r2[:,6], decimal=14)
#Note: for dffits, R uses a threshold around 0.36, mine: dffits[1]=0.24373
#TODO: finish and check thresholds and pvalues
'''
R has
>>> np.nonzero(np.asarray(infl_bool_r["dffit"]))[0]
array([ 6, 26, 63, 76, 90, 199])
>>> np.nonzero(np.asarray(infl_bool_r["cov.r"]))[0]
array([ 4, 26, 59, 61, 63, 72, 76, 84, 91, 92, 94, 95, 108,
197, 198])
>>> np.nonzero(np.asarray(infl_bool_r["hat"]))[0]
array([ 62, 76, 84, 90, 91, 92, 95, 108, 197, 199])
'''
class TestDiagnosticGPandas(TestDiagnosticG):
def __init__(self):
d = macrodata.load_pandas().data
#growth rates
d['gs_l_realinv'] = 400 * np.log(d['realinv']).diff()
d['gs_l_realgdp'] = 400 * np.log(d['realgdp']).diff()
d['lint'] = d['realint'].shift(1)
d['tbilrate'] = d['tbilrate'].shift(1)
d = d.dropna()
self.d = d
endogg = d['gs_l_realinv']
exogg = add_constant(d[['gs_l_realgdp', 'lint']])
exogg2 = add_constant(d[['gs_l_realgdp', 'tbilrate']])
exogg3 = add_constant(d[['gs_l_realgdp']])
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
res_ols3 = OLS(endogg, exogg3).fit()
self.res = res_ols
self.res2 = res_ols2
self.res3 = res_ols3
self.endog = self.res.model.endog
self.exog = self.res.model.exog
def grangertest():
#> gt = grangertest(ginv, ggdp, order=4)
#> gt
#Granger causality test
#
#Model 1: ggdp ~ Lags(ggdp, 1:4) + Lags(ginv, 1:4)
#Model 2: ggdp ~ Lags(ggdp, 1:4)
grangertest = dict(fvalue=1.589672703015157, pvalue=0.178717196987075,
df=(198,193))
def test_outlier_influence_funcs():
#smoke test
x = add_constant(np.random.randn(10, 2))
y = x.sum(1) + np.random.randn(10)
res = OLS(y, x).fit()
oi.summary_table(res, alpha=0.05)
res2 = OLS(y, x[:,0]).fit()
oi.summary_table(res2, alpha=0.05)
infl = res2.get_influence()
infl.summary_table()
def test_influence_wrapped():
from pandas import DataFrame
from pandas.util.testing import assert_series_equal
d = macrodata.load_pandas().data
#growth rates
gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
lint = d['realint'][:-1]
# re-index these because they won't conform to lint
gs_l_realgdp.index = lint.index
gs_l_realinv.index = lint.index
data = dict(const=np.ones_like(lint), lint=lint, lrealgdp=gs_l_realgdp)
#order is important
exog = DataFrame(data, columns=['const','lrealgdp','lint'])
res = OLS(gs_l_realinv, exog).fit()
#basic
# already tested
#assert_almost_equal(lsdiag['cov.scaled'],
# res.cov_params().values.ravel(), decimal=14)
#assert_almost_equal(lsdiag['cov.unscaled'],
# res.normalized_cov_params.values.ravel(), decimal=14)
infl = oi.OLSInfluence(res)
# smoke test just to make sure it works, results separately tested
df = infl.summary_frame()
assert_(isinstance(df, DataFrame))
#this test is slow
fp = open(os.path.join(cur_dir,"results/influence_lsdiag_R.json"))
lsdiag = json.load(fp)
c0, c1 = infl.cooks_distance #TODO: what's c1, it's pvalues? -ss
#NOTE: we get a hard-cored 5 decimals with pandas testing
assert_almost_equal(c0, lsdiag['cooks'], 14)
assert_almost_equal(infl.hat_matrix_diag, (lsdiag['hat']), 14)
assert_almost_equal(infl.resid_studentized_internal,
lsdiag['std.res'], 14)
#slow:
dffits, dffth = infl.dffits
assert_almost_equal(dffits, lsdiag['dfits'], 14)
assert_almost_equal(infl.resid_studentized_external,
lsdiag['stud.res'], 14)
import pandas
fn = os.path.join(cur_dir,"results/influence_measures_R.csv")
infl_r = pandas.read_csv(fn, index_col=0)
conv = lambda s: 1 if s=='TRUE' else 0
fn = os.path.join(cur_dir,"results/influence_measures_bool_R.csv")
#not used yet:
#infl_bool_r = pandas.read_csv(fn, index_col=0,
# converters=dict(zip(lrange(7),[conv]*7)))
infl_r2 = np.asarray(infl_r)
#TODO: finish wrapping this stuff
assert_almost_equal(infl.dfbetas, infl_r2[:,:3], decimal=13)
assert_almost_equal(infl.cov_ratio, infl_r2[:,4], decimal=14)
def test_influence_dtype():
# see #2148 bug when endog is integer
y = np.ones(20)
np.random.seed(123)
x = np.random.randn(20, 3)
res1 = OLS(y, x).fit()
res2 = OLS(y*1., x).fit()
cr1 = res1.get_influence().cov_ratio
cr2 = res2.get_influence().cov_ratio
assert_allclose(cr1, cr2, rtol=1e-14)
# regression test for values
cr3 = np.array(
[ 1.22239215, 1.31551021, 1.52671069, 1.05003921, 0.89099323,
1.57405066, 1.03230092, 0.95844196, 1.15531836, 1.21963623,
0.87699564, 1.16707748, 1.10481391, 0.98839447, 1.08999334,
1.35680102, 1.46227715, 1.45966708, 1.13659521, 1.22799038])
assert_almost_equal(cr1, cr3, decimal=8)
def test_outlier_test():
# results from R with NA -> 1. Just testing interface here because
# outlier_test is just a wrapper
labels = ['accountant', 'pilot', 'architect', 'author', 'chemist',
'minister', 'professor', 'dentist', 'reporter', 'engineer',
'undertaker', 'lawyer', 'physician', 'welfare.worker', 'teacher',
'conductor', 'contractor', 'factory.owner', 'store.manager',
'banker', 'bookkeeper', 'mail.carrier', 'insurance.agent',
'store.clerk', 'carpenter', 'electrician', 'RR.engineer',
'machinist', 'auto.repairman', 'plumber', 'gas.stn.attendant',
'coal.miner', 'streetcar.motorman', 'taxi.driver',
'truck.driver', 'machine.operator', 'barber', 'bartender',
'shoe.shiner', 'cook', 'soda.clerk', 'watchman', 'janitor',
'policeman', 'waiter']
#Duncan's prestige data from car
exog = [[1.0, 62.0, 86.0], [1.0, 72.0, 76.0], [1.0, 75.0, 92.0],
[1.0, 55.0, 90.0], [1.0, 64.0, 86.0], [1.0, 21.0, 84.0],
[1.0, 64.0, 93.0], [1.0, 80.0, 100.0], [1.0, 67.0, 87.0],
[1.0, 72.0, 86.0], [1.0, 42.0, 74.0], [1.0, 76.0, 98.0],
[1.0, 76.0, 97.0], [1.0, 41.0, 84.0], [1.0, 48.0, 91.0],
[1.0, 76.0, 34.0], [1.0, 53.0, 45.0], [1.0, 60.0, 56.0],
[1.0, 42.0, 44.0], [1.0, 78.0, 82.0], [1.0, 29.0, 72.0],
[1.0, 48.0, 55.0], [1.0, 55.0, 71.0], [1.0, 29.0, 50.0],
[1.0, 21.0, 23.0], [1.0, 47.0, 39.0], [1.0, 81.0, 28.0],
[1.0, 36.0, 32.0], [1.0, 22.0, 22.0], [1.0, 44.0, 25.0],
[1.0, 15.0, 29.0], [1.0, 7.0, 7.0], [1.0, 42.0, 26.0],
[1.0, 9.0, 19.0], [1.0, 21.0, 15.0], [1.0, 21.0, 20.0],
[1.0, 16.0, 26.0], [1.0, 16.0, 28.0], [1.0, 9.0, 17.0],
[1.0, 14.0, 22.0], [1.0, 12.0, 30.0], [1.0, 17.0, 25.0],
[1.0, 7.0, 20.0], [1.0, 34.0, 47.0], [1.0, 8.0, 32.0]]
endog = [ 82., 83., 90., 76., 90., 87., 93., 90., 52., 88., 57.,
89., 97., 59., 73., 38., 76., 81., 45., 92., 39., 34.,
41., 16., 33., 53., 67., 57., 26., 29., 10., 15., 19.,
10., 13., 24., 20., 7., 3., 16., 6., 11., 8., 41.,
10.]
ndarray_mod = OLS(endog, exog).fit()
rstudent = [3.1345185839, -2.3970223990, 2.0438046359, -1.9309187757,
1.8870465798, -1.7604905300, -1.7040324156, 1.6024285876,
-1.4332485037, -1.1044851583, 1.0688582315, 1.0185271840,
-0.9024219332, -0.9023876471, -0.8830953936, 0.8265782334,
0.8089220547, 0.7682770197, 0.7319491074, -0.6665962829,
0.5227352794, -0.5135016547, 0.5083881518, 0.4999224372,
-0.4980818221, -0.4759717075, -0.4293565820, -0.4114056499,
-0.3779540862, 0.3556874030, 0.3409200462, 0.3062248646,
0.3038999429, -0.3030815773, -0.1873387893, 0.1738050251,
0.1424246593, -0.1292266025, 0.1272066463, -0.0798902878,
0.0788467222, 0.0722556991, 0.0505098280, 0.0233215136,
0.0007112055]
unadj_p = [0.003177202, 0.021170298, 0.047432955, 0.060427645, 0.066248120,
0.085783008, 0.095943909, 0.116738318, 0.159368890, 0.275822623,
0.291386358, 0.314400295, 0.372104049, 0.372122040, 0.382333561,
0.413260793, 0.423229432, 0.446725370, 0.468363101, 0.508764039,
0.603971990, 0.610356737, 0.613905871, 0.619802317, 0.621087703,
0.636621083, 0.669911674, 0.682917818, 0.707414459, 0.723898263,
0.734904667, 0.760983108, 0.762741124, 0.763360242, 0.852319039,
0.862874018, 0.887442197, 0.897810225, 0.899398691, 0.936713197,
0.937538115, 0.942749758, 0.959961394, 0.981506948, 0.999435989]
bonf_p = [0.1429741, 0.9526634, 2.1344830, 2.7192440, 2.9811654, 3.8602354,
4.3174759, 5.2532243, 7.1716001, 12.4120180, 13.1123861, 14.1480133,
16.7446822, 16.7454918, 17.2050103, 18.5967357, 19.0453245,
20.1026416, 21.0763395, 22.8943818, 27.1787396, 27.4660532,
27.6257642, 27.8911043, 27.9489466, 28.6479487, 30.1460253,
30.7313018, 31.8336506, 32.5754218, 33.0707100, 34.2442399,
34.3233506, 34.3512109, 38.3543568, 38.8293308, 39.9348989,
40.4014601, 40.4729411, 42.1520939, 42.1892152, 42.4237391,
43.1982627, 44.1678127, 44.9746195]
bonf_p = np.array(bonf_p)
bonf_p[bonf_p > 1] = 1
sorted_labels = ["minister", "reporter", "contractor", "insurance.agent",
"machinist", "store.clerk", "conductor", "factory.owner",
"mail.carrier", "streetcar.motorman", "carpenter", "coal.miner",
"bartender", "bookkeeper", "soda.clerk", "chemist", "RR.engineer",
"professor", "electrician", "gas.stn.attendant", "auto.repairman",
"watchman", "banker", "machine.operator", "dentist", "waiter",
"shoe.shiner", "welfare.worker", "plumber", "physician", "pilot",
"engineer", "accountant", "lawyer", "undertaker", "barber",
"store.manager", "truck.driver", "cook", "janitor", "policeman",
"architect", "teacher", "taxi.driver", "author"]
res2 = np.c_[rstudent, unadj_p, bonf_p]
res = oi.outlier_test(ndarray_mod, method='b', labels=labels, order=True)
np.testing.assert_almost_equal(res.values, res2, 7)
np.testing.assert_equal(res.index.tolist(), sorted_labels) # pylint: disable-msg=E1103
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x'], exit=False)
#t = TestDiagnosticG()
#t.test_basic()
#t.test_hac()
#t.test_acorr_breusch_godfrey()
#t.test_acorr_ljung_box()
#t.test_het_goldfeldquandt()
#t.test_het_breusch_pagan()
#t.test_het_white()
#t.test_compare_lr()
#t.test_compare_nonnested()
#t.test_influence()
##################################################
'''
J test
Model 1: ginv ~ ggdp + lint
Model 2: ginv ~ ggdp + tbilrate
Estimate Std. Error t value Pr(>|t|)
M1 + fitted(M2) 1.591505670785873 0.7384552861695823 2.15518 0.0323546 *
M2 + fitted(M1) 1.305687653016899 0.4808385176653064 2.71544 0.0072039 **
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
= lm(ginv ~ ggdp + tbilrate)
> ct = coxtest(fm, fm3)
> ct
Cox test
Model 1: ginv ~ ggdp + lint
Model 2: ginv ~ ggdp + tbilrate
Estimate Std. Error z value Pr(>|z|)
fitted(M1) ~ M2 -0.782030488930356 0.599696502782265 -1.30404 0.19222
fitted(M2) ~ M1 -2.248817107408537 0.392656854330139 -5.72718 1.0211e-08 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> et = encomptest(fm, fm3)
> et
Encompassing test
Model 1: ginv ~ ggdp + lint
Model 2: ginv ~ ggdp + tbilrate
Model E: ginv ~ ggdp + lint + tbilrate
Res.Df Df F Pr(>F)
M1 vs. ME 198 -1 4.64481 0.0323546 *
M2 vs. ME 198 -1 7.37361 0.0072039 **
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> fm4 = lm(realinv ~ realgdp + realint, data=d)
> fm5 = lm(log(realinv) ~ realgdp + realint, data=d)
> pet = petest(fm4, fm5)
> pet
PE test
Model 1: realinv ~ realgdp + realint
Model 2: log(realinv) ~ realgdp + realint
Estimate Std. Error t value
M1 + log(fit(M1))-fit(M2) -229.281878354594596 44.5087822087058598 -5.15139
M2 + fit(M1)-exp(fit(M2)) 0.000634664704814 0.0000462387010349 13.72583
Pr(>|t|)
M1 + log(fit(M1))-fit(M2) 6.2013e-07 ***
M2 + fit(M1)-exp(fit(M2)) < 2.22e-16 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
'''
| bsd-3-clause |
FluVigilanciaBR/seasonality | methods/data_filter/delay_table_4Weeks.py | 1 | 5515 | __author__ = 'Marcelo Ferreira da Costa Gomes'
import pandas as pd
import numpy as np
import episem
import sys
import datetime
import calendar
import argparse
from argparse import RawDescriptionHelpFormatter
def readtable(fname):
tgt_cols = ['SG_UF_NOT', 'DT_SIN_PRI_epiyear', 'DT_SIN_PRI_epiweek', 'SinPri2Digita_DelayWeeks']
df = pd.read_csv(fname, low_memory=False)[tgt_cols].rename(columns={
'SG_UF_NOT': 'UF',
'DT_SIN_PRI_epiyear': 'epiyear',
'DT_SIN_PRI_epiweek': 'epiweek',
'SinPri2Digita_DelayWeeks': 'DelayWeeks'
})
df = df.loc[~pd.isnull(df.UF), ]
# df.UF = df.UF.astype('int64')
return df
def cumhistfull(x):
xlim = x.max() + 1
bins = np.arange(-0.5, xlim, 1)
h, xi = np.histogram(x, bins=bins, normed=True)
dx = xi[1] - xi[0]
h = np.cumsum(h) * dx * 100
dfcumhist = pd.DataFrame([[a, b] for a, b in zip(range(0, min(xlim, 9)), h[0:min(xlim, 9)])],
columns=['Weeks', 'Cumulative'])
if xlim > 9:
dfcumhist = dfcumhist.append(pd.DataFrame([{'Weeks': '> 8', 'Cumulative': 100}]), sort=True)
return dfcumhist
def cumsumdata(x):
n = len(x)
f = np.sort(x)
f = np.array()
def createtable(df=pd.DataFrame(), testmonth=None, testyear=None):
now = datetime.datetime.now()
if testmonth is None:
if testyear is not None:
exit('Year provided without corresponding month. Please provide (df, month, year) or (df) alone')
month = now.month
testyear = now.year
if month == 1:
testmonth = 12
testyear -= 1
else:
testmonth = month - 1
else:
testmonth = int(testmonth)
if testyear is None:
testyear = now.year
testyear = int(testyear)
epiweekstart = episem.episem('%s-%s-01' % (testyear, testmonth))
epiyearstart = int(epiweekstart.split('W')[0])
epiweekstart = int(epiweekstart.split('W')[1])
lastday = calendar.monthrange(testyear, testmonth)[1]
epiweekend = episem.episem('%s-%s-%s' % (testyear, testmonth, lastday))
epiyearend = int(epiweekend.split('W')[0])
epiweekend = int(epiweekend.split('W')[1])
# Filter data keeping only SinPriations within desired window:
if epiyearend > epiyearstart:
df = df[((df.epiweek <= epiweekend) & (df.epiyear == epiyearend)) | (df.epiyear < epiyearend)]
df = df[(df.epiyear == epiyearend) | ((df.epiweek >= epiweekstart) & (df.epiyear == epiyearstart))]
else:
df = df[(df.epiyear == epiyearend) & (df.epiweek >= epiweekstart) & (df.epiweek <= epiweekend)]
# Fill with all epiweeks:
lastweek = df.epiweek[df.epiyear == epiyearend].max()
firstweek = df.epiweek[df.epiyear == epiyearstart].min()
uflist = list(df.UF.unique())
tmpdict = []
if epiyearstart == epiyearend:
for week in range(firstweek, (lastweek + 1)):
for uf in uflist:
tmpdict.extend([{'UF': uf, 'epiyear': epiyearend, 'epiweek': week}])
else:
year = epiyearstart
for week in range(firstweek, (int(episem.lastepiweek(year)) + 1)):
for uf in uflist:
tmpdict.extend([{'UF': uf, 'epiyear': year, 'epiweek': week}])
tmpdict.extend([{'UF': uf, 'epiyear': epiyearend, 'epiweek': week} for week in
range(1, (lastweek + 1)) for uf in uflist])
dftmp = pd.DataFrame(tmpdict)
grp_cols = ['UF', 'epiyear', 'epiweek']
delay_table = df[grp_cols].groupby(grp_cols, as_index=False).size().reset_index()
delay_table.rename(columns={0: 'Notifications'}, inplace=True)
for k in range(0, 27):
aux = df.loc[df.DelayWeeks == k,].groupby(grp_cols, as_index=False).size().reset_index()
aux.rename(columns={0: 'd%s' % k}, inplace=True)
delay_table = delay_table.merge(aux, how='left', on=grp_cols).fillna(0)
delay_table = dftmp.merge(delay_table, how='left', on=grp_cols).fillna(0)
delay_table['Notifications_6m'] = delay_table[['d%s' % k for k in range(0, 27)]].sum(axis=1)
return delay_table, testmonth, testyear
def histograms(df):
df4w = df[['UF', 'epiyear', 'epiweek', 'd0', 'd1', 'd2', 'd3', 'd4']]
cols = ['UF', 'd0', 'd1', 'd2', 'd3', 'd4']
df4w = df4w[cols].groupby('UF', as_index=False).agg(sum)
cols = ['UF']
cols.extend(['d%s' % w for w in range(0, 27)])
df = df[cols].groupby('UF', as_index=False).agg(sum)
return df4w, df
def main(fname, testmonth=None, testyear=None):
df = readtable(fname)
df, testmonth, testyear = createtable(df, testmonth=testmonth, testyear=testyear)
dfhisto4w, dfhisto = histograms(df)
df.to_csv(fname[:-4] + '_delay_table_%s_%s.csv' % (testyear, testmonth), index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Exemple usage:\n" +
"python3 delay_table_4Weeks.py --path \n" +
"../../data/data/clean_data_epiweek-weekly-incidence_w_situation.csv\n" +
'--month 1 --year 2017',
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--path', help='Path to data file')
parser.add_argument('--year', help='Year')
parser.add_argument('--month', help='Month')
args = parser.parse_args()
print(args)
main(fname=args.path, testmonth=args.month, testyear=args.year)
| gpl-3.0 |
sarahgrogan/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
embeddedarm/android_external_chromium_org | chrome/browser/nacl_host/test/gdb_rsp.py | 99 | 2431 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is based on gdb_rsp.py file from NaCl repository.
import re
import socket
import time
def RspChecksum(data):
checksum = 0
for char in data:
checksum = (checksum + ord(char)) % 0x100
return checksum
class GdbRspConnection(object):
def __init__(self, addr):
self._socket = self._Connect(addr)
def _Connect(self, addr):
# We have to poll because we do not know when sel_ldr has
# successfully done bind() on the TCP port. This is inherently
# unreliable.
# TODO(mseaborn): Add a more reliable connection mechanism to
# sel_ldr's debug stub.
timeout_in_seconds = 10
poll_time_in_seconds = 0.1
for i in xrange(int(timeout_in_seconds / poll_time_in_seconds)):
# On Mac OS X, we have to create a new socket FD for each retry.
sock = socket.socket()
try:
sock.connect(addr)
except socket.error:
# Retry after a delay.
time.sleep(poll_time_in_seconds)
else:
return sock
raise Exception('Could not connect to sel_ldr\'s debug stub in %i seconds'
% timeout_in_seconds)
def _GetReply(self):
reply = ''
while True:
data = self._socket.recv(1024)
if len(data) == 0:
raise AssertionError('EOF on socket reached with '
'incomplete reply message: %r' % reply)
reply += data
if '#' in data:
break
match = re.match('\+\$([^#]*)#([0-9a-fA-F]{2})$', reply)
if match is None:
raise AssertionError('Unexpected reply message: %r' % reply)
reply_body = match.group(1)
checksum = match.group(2)
expected_checksum = '%02x' % RspChecksum(reply_body)
if checksum != expected_checksum:
raise AssertionError('Bad RSP checksum: %r != %r' %
(checksum, expected_checksum))
# Send acknowledgement.
self._socket.send('+')
return reply_body
# Send an rsp message, but don't wait for or expect a reply.
def RspSendOnly(self, data):
msg = '$%s#%02x' % (data, RspChecksum(data))
return self._socket.send(msg)
def RspRequest(self, data):
self.RspSendOnly(data)
return self._GetReply()
def RspInterrupt(self):
self._socket.send('\x03')
return self._GetReply()
| bsd-3-clause |
endolith/scipy | scipy/integrate/odepack.py | 21 | 10740 | # Author: Travis Oliphant
__all__ = ['odeint']
import numpy as np
from . import _odepack
from copy import copy
import warnings
class ODEintWarning(Warning):
pass
_msgs = {2: "Integration successful.",
1: "Nothing was done; the integration time was 0.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error).",
-8: "Run terminated (internal error)."
}
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
mxords=5, printmessg=0, tfirst=False):
"""
Integrate a system of ordinary differential equations.
.. note:: For new code, use `scipy.integrate.solve_ivp` to solve a
differential equation.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
Solves the initial value problem for stiff or non-stiff systems
of first order ode-s::
dy/dt = func(y, t, ...) [or func(t, y, ...)]
where y can be a vector.
.. note:: By default, the required order of the first two arguments of
`func` are in the opposite order of the arguments in the system
definition function used by the `scipy.integrate.ode` class and
the function `scipy.integrate.solve_ivp`. To use a function with
the signature ``func(t, y, ...)``, the argument `tfirst` must be
set to ``True``.
Parameters
----------
func : callable(y, t, ...) or callable(t, y, ...)
Computes the derivative of y at t.
If the signature is ``callable(t, y, ...)``, then the argument
`tfirst` must be set ``True``.
y0 : array
Initial condition on y (can be a vector).
t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
This sequence must be monotonically increasing or monotonically
decreasing; repeated values are allowed.
args : tuple, optional
Extra arguments to pass to function.
Dfun : callable(y, t, ...) or callable(t, y, ...)
Gradient (Jacobian) of `func`.
If the signature is ``callable(t, y, ...)``, then the argument
`tfirst` must be set ``True``.
col_deriv : bool, optional
True if `Dfun` defines derivatives down columns (faster),
otherwise `Dfun` should define derivatives across rows.
full_output : bool, optional
True if to return a dictionary of optional outputs as the second output
printmessg : bool, optional
Whether to print the convergence message
tfirst: bool, optional
If True, the first two arguments of `func` (and `Dfun`, if given)
must ``t, y`` instead of the default ``y, t``.
.. versionadded:: 1.1.0
Returns
-------
y : array, shape (len(t), len(y0))
Array containing the value of y for each desired time in t,
with the initial value `y0` in the first row.
infodict : dict, only returned if full_output == True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'hu' vector of step sizes successfully used for each time step
'tcur' vector with the value of t reached for each time step
(will always be at least as large as the input times)
'tolsf' vector of tolerance scale factors, greater than 1.0,
computed when a request for too much accuracy was detected
'tsw' value of t at the time of the last method switch
(given for each time step)
'nst' cumulative number of time steps
'nfe' cumulative number of function evaluations for each time step
'nje' cumulative number of jacobian evaluations for each time step
'nqu' a vector of method orders for each successful step
'imxer' index of the component of largest magnitude in the
weighted local error vector (e / ewt) on an error return, -1
otherwise
'lenrw' the length of the double work array required
'leniw' the length of integer work array required
'mused' a vector of method indicators for each successful time step:
1: adams (nonstiff), 2: bdf (stiff)
======= ============================================================
Other Parameters
----------------
ml, mu : int, optional
If either of these are not None or non-negative, then the
Jacobian is assumed to be banded. These give the number of
lower and upper non-zero diagonals in this banded matrix.
For the banded case, `Dfun` should return a matrix whose
rows contain the non-zero bands (starting with the lowest diagonal).
Thus, the return matrix `jac` from `Dfun` should have shape
``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
The data in `jac` must be stored such that ``jac[i - j + mu, j]``
holds the derivative of the `i`th equation with respect to the `j`th
state variable. If `col_deriv` is True, the transpose of this
`jac` must be returned.
rtol, atol : float, optional
The input parameters `rtol` and `atol` determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form ``max-norm of (e / ewt) <= 1``,
where ewt is a vector of positive error weights computed as
``ewt = rtol * abs(y) + atol``.
rtol and atol can be either vectors the same length as y or scalars.
Defaults to 1.49012e-8.
tcrit : ndarray, optional
Vector of critical points (e.g., singularities) where integration
care should be taken.
h0 : float, (0: solver-determined), optional
The step size to be attempted on the first step.
hmax : float, (0: solver-determined), optional
The maximum absolute step size allowed.
hmin : float, (0: solver-determined), optional
The minimum absolute step size allowed.
ixpr : bool, optional
Whether to generate extra printing at method switches.
mxstep : int, (0: solver-determined), optional
Maximum number of (internally defined) steps allowed for each
integration point in t.
mxhnil : int, (0: solver-determined), optional
Maximum number of messages printed.
mxordn : int, (0: solver-determined), optional
Maximum order to be allowed for the non-stiff (Adams) method.
mxords : int, (0: solver-determined), optional
Maximum order to be allowed for the stiff (BDF) method.
See Also
--------
solve_ivp : solve an initial value problem for a system of ODEs
ode : a more object-oriented integrator based on VODE
quad : for finding the area under a curve
Examples
--------
The second order differential equation for the angle `theta` of a
pendulum acted on by gravity with friction can be written::
theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
where `b` and `c` are positive constants, and a prime (') denotes a
derivative. To solve this equation with `odeint`, we must first convert
it to a system of first order equations. By defining the angular
velocity ``omega(t) = theta'(t)``, we obtain the system::
theta'(t) = omega(t)
omega'(t) = -b*omega(t) - c*sin(theta(t))
Let `y` be the vector [`theta`, `omega`]. We implement this system
in Python as:
>>> def pend(y, t, b, c):
... theta, omega = y
... dydt = [omega, -b*omega - c*np.sin(theta)]
... return dydt
...
We assume the constants are `b` = 0.25 and `c` = 5.0:
>>> b = 0.25
>>> c = 5.0
For initial conditions, we assume the pendulum is nearly vertical
with `theta(0)` = `pi` - 0.1, and is initially at rest, so
`omega(0)` = 0. Then the vector of initial conditions is
>>> y0 = [np.pi - 0.1, 0.0]
We will generate a solution at 101 evenly spaced samples in the interval
0 <= `t` <= 10. So our array of times is:
>>> t = np.linspace(0, 10, 101)
Call `odeint` to generate the solution. To pass the parameters
`b` and `c` to `pend`, we give them to `odeint` using the `args`
argument.
>>> from scipy.integrate import odeint
>>> sol = odeint(pend, y0, t, args=(b, c))
The solution is an array with shape (101, 2). The first column
is `theta(t)`, and the second is `omega(t)`. The following code
plots both components.
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
>>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
>>> plt.legend(loc='best')
>>> plt.xlabel('t')
>>> plt.grid()
>>> plt.show()
"""
if ml is None:
ml = -1 # changed to zero inside function call
if mu is None:
mu = -1 # changed to zero inside function call
dt = np.diff(t)
if not((dt >= 0).all() or (dt <= 0).all()):
raise ValueError("The values in t must be monotonically increasing "
"or monotonically decreasing; repeated values are "
"allowed.")
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
full_output, rtol, atol, tcrit, h0, hmax, hmin,
ixpr, mxstep, mxhnil, mxordn, mxords,
int(bool(tfirst)))
if output[-1] < 0:
warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
warnings.warn(warning_msg, ODEintWarning)
elif printmessg:
warning_msg = _msgs[output[-1]]
warnings.warn(warning_msg, ODEintWarning)
if full_output:
output[1]['message'] = _msgs[output[-1]]
output = output[:-1]
if len(output) == 1:
return output[0]
else:
return output
| bsd-3-clause |
alvarofierroclavero/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 162 | 7103 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
grlee77/pywt | demo/plot_wavelets.py | 8 | 2656 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Plot scaling and wavelet functions for db, sym, coif, bior and rbio families
import itertools
import matplotlib.pyplot as plt
import pywt
plot_data = [('db', (4, 3)),
('sym', (4, 3)),
('coif', (3, 2))]
for family, (rows, cols) in plot_data:
fig = plt.figure()
fig.subplots_adjust(hspace=0.2, wspace=0.2, bottom=.02, left=.06,
right=.97, top=.94)
colors = itertools.cycle('bgrcmyk')
wnames = pywt.wavelist(family)
i = iter(wnames)
for col in range(cols):
for row in range(rows):
try:
wavelet = pywt.Wavelet(next(i))
except StopIteration:
break
phi, psi, x = wavelet.wavefun(level=5)
color = next(colors)
ax = fig.add_subplot(rows, 2 * cols, 1 + 2 * (col + row * cols))
ax.set_title(wavelet.name + " phi")
ax.plot(x, phi, color)
ax.set_xlim(min(x), max(x))
ax = fig.add_subplot(rows, 2*cols, 1 + 2*(col + row*cols) + 1)
ax.set_title(wavelet.name + " psi")
ax.plot(x, psi, color)
ax.set_xlim(min(x), max(x))
for family, (rows, cols) in [('bior', (4, 3)), ('rbio', (4, 3))]:
fig = plt.figure()
fig.subplots_adjust(hspace=0.5, wspace=0.2, bottom=.02, left=.06,
right=.97, top=.94)
colors = itertools.cycle('bgrcmyk')
wnames = pywt.wavelist(family)
i = iter(wnames)
for col in range(cols):
for row in range(rows):
try:
wavelet = pywt.Wavelet(next(i))
except StopIteration:
break
phi, psi, phi_r, psi_r, x = wavelet.wavefun(level=5)
row *= 2
color = next(colors)
ax = fig.add_subplot(2*rows, 2*cols, 1 + 2*(col + row*cols))
ax.set_title(wavelet.name + " phi")
ax.plot(x, phi, color)
ax.set_xlim(min(x), max(x))
ax = fig.add_subplot(2*rows, 2*cols, 2*(1 + col + row*cols))
ax.set_title(wavelet.name + " psi")
ax.plot(x, psi, color)
ax.set_xlim(min(x), max(x))
row += 1
ax = fig.add_subplot(2*rows, 2*cols, 1 + 2*(col + row*cols))
ax.set_title(wavelet.name + " phi_r")
ax.plot(x, phi_r, color)
ax.set_xlim(min(x), max(x))
ax = fig.add_subplot(2*rows, 2*cols, 1 + 2*(col + row*cols) + 1)
ax.set_title(wavelet.name + " psi_r")
ax.plot(x, psi_r, color)
ax.set_xlim(min(x), max(x))
plt.show()
| mit |
MMKrell/pyspace | pySPACE/missions/operations/comp_analysis.py | 4 | 36892 | """ Creates various comparing plots on several levels for a :class:`~pySPACE.resources.dataset_defs.performance_result.PerformanceResultSummary`
This module contains implementations of an operation and
a process for analyzing data contained in a csv file (typically the result
of a Weka Classification Operation).
A *CompAnalysisProcess* consists of evaluating the effect of several parameter
on a set of metrics. For each numeric parameter, each pair of numeric parameters
and each nominal parameter, one plot is created for each metric.
Furthermore, for each value of each parameter, the rows of the data where
the specific parameter takes on the specific value are selected and the same
analysis is done for this subset recursively.
This is useful for large experiments where several parameters are differed.
For instance, if one wants to analyze how the performance is for certain
settings of certain parameters, on can get all plots in the respective
subdirectories. For instance, if one is interested only in the performance
of one classifier, on can go into the subdirectory of the respective classifier.
The "Comp Analysis Operation" is similar to the "Analysis Operation"; the only
difference is that plots corresponding to the same parameters but different
metrics are created in one single pdf file. This ensures better comparability
and lower computation time, as less files are created.
.. todo:: Unification with analysis operation to prevent from doubled code
.. todo:: correct parameter documentation
"""
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
import processing
else:
import multiprocessing as processing
import pylab
import numpy
import os
import itertools
import matplotlib.font_manager
from collections import defaultdict
from pySPACE.tools.filesystem import create_directory
from pySPACE.resources.dataset_defs.base import BaseDataset
import pySPACE
from pySPACE.missions.operations.base import Operation, Process
class CompAnalysisOperation(Operation):
""" Operation for analyzing and plotting data in compressed format
A *CompAnalysisOperation* is similar to a *AnalysisOperation*:
An *AnalysisOperation* loads the data from a csv-file (typically the result
of a Weka Classification Operation) and evaluates the effect of various
parameters on several metrics.
"""
def __init__( self, processes, operation_spec, result_directory,
number_processes, create_process=None):
super( CompAnalysisOperation, self ).__init__( processes,
operation_spec, result_directory )
self.operation_spec = operation_spec
self.create_process = create_process
self.number_processes = number_processes
@classmethod
def create(cls, operation_spec, result_directory, debug=False, input_paths=[]):
"""
A factory method that creates an Analysis operation based on the
information given in the operation specification operation_spec.
If debug is TRUE the creation of the Analysis Processes will not
be in a separated thread.
"""
assert(operation_spec["type"] == "comp_analysis")
input_path = operation_spec["input_path"]
summary = BaseDataset.load(os.path.join(pySPACE.configuration.storage,
input_path))
data_dict = summary.data
## Done
# Determine the parameters that should be analyzed
parameters = operation_spec["parameters"]
# Determine dependent parameters, which don't get extra resolution
try:
dep_par = operation_spec["dep_par"]
except KeyError:
dep_par=[]
# Determine the metrics that should be plotted
spec_metrics = operation_spec["metrics"]
metrics=[]
for metric in spec_metrics:
if data_dict.has_key(metric):
metrics.append(metric)
else:
import warnings
warnings.warn('The metric "' + metric + '" is not contained in the results csv file.')
if len(metrics)==0:
warnings.warn('No metric available from spec file, default to first dict entry.')
metrics.append(data_dict.keys()[0])
# Determine how many processes will be created
number_parameter_values = [len(set(data_dict[param])) for param in parameters]
number_processes = cls._numberOfProcesses(0, number_parameter_values)+1
logscale = False
if operation_spec.has_key('logscale'):
logscale = operation_spec['logscale']
markertype='x'
if operation_spec.has_key('markertype'):
markertype = operation_spec['markertype']
if debug == True:
# To better debug creation of processes we don't limit the queue
# and create all processes before executing them
processes = processing.Queue()
cls._createProcesses(processes, result_directory, data_dict, parameters,
dep_par, metrics, logscale, markertype, True)
return cls( processes, operation_spec, result_directory, number_processes)
else:
# Create all plot processes by calling a recursive helper method in
# another thread so that already created processes can be executed
# although creation of processes is not finished yet. Therefore a queue
# is used which size is limited to guarantee that not to much objects
# are created (since this costs memory). However, the actual number
# of 100 is arbitrary and might be reviewed.
processes = processing.Queue(100)
create_process = processing.Process(target=cls._createProcesses,
args=( processes, result_directory, data_dict,
parameters, dep_par, metrics, logscale, markertype, True))
create_process.start()
# create and return the comp_analysis operation object
return cls( processes, operation_spec, result_directory,
number_processes, create_process)
@classmethod
def _numberOfProcesses(cls, number_of_processes, number_of_parameter_values):
"""Recursive function to determine the number of processes that
will be created for the given *number_of_parameter_values*
"""
if len(number_of_parameter_values) < 3:
number_of_processes += sum(number_of_parameter_values)
return number_of_processes
else:
for i in range(len(number_of_parameter_values)):
number_of_processes += number_of_parameter_values[i] * \
cls._numberOfProcesses(0,
[number_of_parameter_values[j] for j in \
range(len(number_of_parameter_values)) if j != i]) + \
number_of_parameter_values[i]
return number_of_processes
@classmethod
def _createProcesses(cls, processes, result_dir, data_dict, parameters,dep_par,
metrics, logscale, markertype, top_level):
"""Recursive function that is used to create the analysis processes
Each process creates one plot for each numeric parameter, each pair of
numeric parameters, and each nominal parameter based on the data
contained in the *data_dict*. The results are stored in *result_dir*.
The method calls itself recursively for each value of each parameter.
"""
# Create the analysis process for the given parameters and the
# given data and put it in the executing-queue
process = CompAnalysisProcess(result_dir, data_dict, parameters, metrics, logscale, markertype)
processes.put( process )
# If we have less than two parameters it does not make sense to
# split further
if len(parameters) < 2 or len(parameters)==len(dep_par):
# If we have only one parameter to visualize,
# we don't need to create any further processes,
# and we have to finish the creating process.
return
# For each parameter
for proj_parameter in parameters:
if proj_parameter in dep_par:
continue
# We split the data based on the values of this parameter
remaining_parameters = [parameter for parameter in parameters
if parameter != proj_parameter]
# For each value the respective projection parameter can take on
for value in set(data_dict[proj_parameter]):
# Project the result dict onto the rows where the respective
# parameter takes on the given value
projected_dict = defaultdict(list)
entries_added = False
for i in range(len(data_dict[parameter])):
if data_dict[proj_parameter][i] == value:
entries_added = True
for column_key in data_dict.keys():
if column_key == proj_parameter: continue
projected_dict[column_key].append(data_dict[column_key][i])
# If the projected_dict is empty we continue
if not entries_added:
continue
# Create result_dir and do the recursive call for the
# projected data
proj_result_dir = result_dir + os.sep + "%s#%s" % (proj_parameter,
value)
create_directory(proj_result_dir)
cls._createProcesses(processes, proj_result_dir, projected_dict,
remaining_parameters, dep_par, metrics, logscale, markertype, False)
if top_level == True:
# print "last process created"
# give executing process the sign that creation is now finished
processes.put(False)
def consolidate(self):
pass
class CompAnalysisProcess(Process):
""" Process for analyzing and plotting data
A *CompAnalysisProcess* is quite similar to a *AnalysisProcess*:
An *CompAnalysisProcess* consists of evaluating the effect of several
*parameters* on a set of *metrics*. For each numeric parameter,
each pair of numeric parameters and each nominal parameter,
one plot is created for all metrics (instead of one plot for each metric as
in *AnalysisProcess*).
**Expected parameters**
*result_dir* : The directory in which the actual results are stored
*data_dict* : A dictionary containing all the data. The dictionary \
contains a mapping from an attribute (e.g. accuracy) \
to a list of values taken by an attribute. An entry is the\
entirety of all i-th values over all dict-values
*parameters* : The parameters which have been varied during the \
experiment and whose effect on the *metrics* should be \
investigated. These must be keys of the *data_dict*.
*metrics*: The metrics the should be evaluated. Must be keys of the \
*data_dict*.
*logscale*: Boolean, numeric x-axis will be scaled log'ly if true.
*markertype*: A string like '.' defining the marker type for certain \
plots. Default is 'x'.
"""
def __init__(self, result_dir, data_dict, parameters, metrics, logscale, markertype):
super(CompAnalysisProcess, self).__init__()
self.result_dir = result_dir
self.data_dict = data_dict
self.parameters = parameters
self.logscale = logscale
self.markertype = markertype
# Usually the value of a metric for a certain situation is just a scalar
# value. However, for certain metrics the value can be a sequence
# (typically the change of some measure over time). These cases must be
# indicated externally by the the usage of ("metric_name", "sequence")
# instead of just "mteric_name".
self.metrics = [(metric, "scalar") if isinstance(metric, basestring)
else metric for metric in metrics]
def __call__(self):
"""
Executes this process on the respective modality
"""
# Restore configuration
pySPACE.configuration = self.configuration
############## Prepare benchmarking ##############
super(CompAnalysisProcess, self).pre_benchmarking()
# Split parameters into nominal and numeric parameters
nominal_parameters = []
numeric_parameters = []
for parameter in self.parameters:
try:
# Try to create a float of the first value of the parameter
float(self.data_dict[parameter][0])
# No exception thus a numeric attribute
numeric_parameters.append(parameter)
except ValueError:
# This is not a numeric parameter, treat it as nominal
nominal_parameters.append(parameter)
except KeyError:
#"This exception should inform the user about wrong parameters in his YAML file.", said Jan-Hendrik-Metzen.
import warnings
warnings.warn('The parameter "' + parameter + '" is not contained in the performance results.')
except IndexError:
#This exception informs the user about wrong parameters in his YAML file.
import warnings
warnings.warn('The parameter "' + parameter + '" could not be found.')
#TODO: Better exception treatment! The Program should ignore unknown
# parameters and go on after giving information on the wrong parameter.
#Create the figure and initialize ...
fig1 = pylab.figure()
ax = [] #... a list of matplotlib axes objects
im = [] #... and images. This is needed for the color bars of
# contour plots
# For all performance measures
# Pass figure and axes to functions, let them add their plot,
# then return fig and axes.
for metric in self.metrics:
if metric[1] == 'scalar':
fig1, ax, im = self._scalar_metric(metric[0], numeric_parameters,
nominal_parameters, fig1, ax, im)
else:
im.append([]) # Don't need the image in this case
fig1, ax = self._sequence_metric(metric[0], numeric_parameters,
nominal_parameters, fig1, ax, **metric[2])
nmetrics = self.metrics.__len__() # Check Nr of metrices
naxes = ax.__len__() # and axes
# And compute how many subplot columns are needed
plot_cols = numpy.int(numpy.ceil(numpy.float(naxes)
/ numpy.float(nmetrics)))
# Now loop the axes
for i in range(naxes):
# And move every single one to it's desired location
ax[i].change_geometry(nmetrics, plot_cols, i + 1)
# in case of a contour plot also add a corresponding colorbar...
if ax[i]._label[-1] == 'C':
fig1.sca(ax[i])
pylab.colorbar(im[i]) #... by using the correct image
# enlarge canvas so that nothing overlaps
fig1.set_size_inches((10 * plot_cols, 7 * nmetrics))
# save...
fig1.savefig("%s%splot.pdf" % (self.result_dir, os.sep), bbox_inches="tight")
# and clean up.
del fig1
pylab.gca().clear()
pylab.close("all")
############## Clean up after benchmarking ##############
super(CompAnalysisProcess, self).post_benchmarking()
def _scalar_metric(self, metric, numeric_parameters, nominal_parameters, fig1, ax, im):
""" Creates the plots for a scalar metric """
# For all numeric parameters
for index, parameter1 in enumerate(numeric_parameters):
# need not to save image in these cases, as no colorbar is needed
im.append([])
fig1, ax = self._plot_numeric(self.data_dict, self.result_dir,
fig1, ax,
x_key=parameter1,
y_key=metric,
one_figure=False,
show_errors=True)
# For all combinations of two numeric parameters
for parameter2 in numeric_parameters[index + 1:]:
axis_keys = [parameter1, parameter2]
fig1, ax, im = self._plot_numeric_vs_numeric(self.data_dict,
self.result_dir, fig1, ax, im,
axis_keys=axis_keys,
value_key=metric)
# For all combinations of a numeric and a nominal parameter
for parameter2 in nominal_parameters:
im.append([])
axis_keys = [parameter1, parameter2]
fig1, ax = self._plot_numeric_vs_nominal(self.data_dict,
self.result_dir, fig1, ax,
numeric_key=parameter1,
nominal_key=parameter2,
value_key=metric)
# fig1.gca().set_xlim(0.0, 0.01)
# fig1.gca().set_xscale('log')
# For all nominal parameters:
for index, parameter1 in enumerate(nominal_parameters):
im.append([])
fig1, ax = self._plot_nominal(self.data_dict, self.result_dir,
fig1, ax, x_key=parameter1,
y_key=metric)
return fig1, ax, im
def _sequence_metric(self, metric, numeric_parameters, nominal_parameters,
fig1, ax, mwa_window_length):
""" Creates the plots for a sequence metric
.. todo:: Do not distinguish nominal and numeric parameters for the moment
"""
parameters = list(numeric_parameters)
parameters.extend(nominal_parameters)
metric_values = map(eval, self.data_dict[metric])
# Sometimes, the number of values are not identical, so we cut all to
# the same minimal length
num_values = min(map(len, metric_values))
metric_values = map(lambda l: l[0:num_values], metric_values)
# Moving window average of the metric values
mwa_metric_values = []
for sequence in metric_values:
mwa_metric_values.append([])
for index in range(len(sequence)):
# Chop window such that does not go beyond the range of
# available values
window_width = min(index, len(sequence) - index - 1,
mwa_window_length / 2)
subrange = (index - window_width, index + window_width)
mwa_metric_values[-1].append(numpy.mean(sequence[subrange[0]:subrange[1]]))
# For each parameter
for parameter in parameters:
# Split the data according to the values the parameter takes on
curves = defaultdict(list)
for row in range(len(self.data_dict[parameter])):
curves[self.data_dict[parameter][row]].append(mwa_metric_values[row])
# Plot the mean curve over all runs for this parameter setting
ax.append(fig1.add_subplot(111, label="%d" % (ax.__len__() + 1)))
fig1.sca(ax[-1])
for parameter_value, curve in curves.iteritems():
# Create a simple plot
pylab.plot(range(len(metric_values[0])), numpy.mean(curve, 0),
label=parameter_value)
# # Create an errorbar plot
# pylab.errorbar(x = range(len(metric_values[0])),
# y = numpy.mean(curve, 0),
# yerr = numpy.std(curve, 0),
# elinewidth = 1, capsize = 5,
# label = parameter_value)
lg = pylab.legend(loc=0,fancybox=True)
lg.get_frame().set_facecolor('0.90')
lg.get_frame().set_alpha(.3)
pylab.xlabel("Step")
pylab.ylabel(metric)
return fig1, ax
def _plot_numeric(self, data, result_dir, fig1, ax, x_key, y_key, conditions=[],
one_figure=False, show_errors=False):
""" Creates a plot of the y_keys for the given numeric parameter x_key.
A method that allows to create a plot that visualizes the effect
of differing one variable onto a second one (e.g. the effect of
differing the number of features onto the accuracy).
**Expected parameters**
*data* : A dictionary, that contains a mapping from an attribute \
(e.g. accuracy) to a list of values taken by an attribute. \
An entry is the entirety of all i-th values over all dict-values
*result_dir* : The directory in which the plots will be saved.
*x_key* : The key of the dictionary whose values should be used as \
values for the x-axis (the independent variables)
*y_key* : The key of the dictionary whose values should be used as\
values for the y-axis, i.e. the dependent variables\
*conditions* : A list of functions that need to be fulfilled in order to \
use one entry in the plot. Each function has to take two \
arguments: The data dictionary containing all entries and \
the index of the entry that should be checked. Each condition \
must return a boolean value.
*one_figure* : If true, all curves are plotted in the same figure.
Otherwise, for each value of curve_key, a new figure\
is generated (currently ignored)
*show_errors* : If true, error bars are plotted
"""
ax.append(fig1.add_subplot(111, label="%d" % (ax.__len__() + 1)))
fig1.sca(ax[-1])
pylab.xlabel(x_key)
curves = defaultdict(lambda : defaultdict(list))
for i in range(len(data[x_key])):
# Check is this particular entry should be used
if not all(condition(data, i) for condition in conditions):
continue
# Get the value of the independent variable for this entry
x_value = float(data[x_key][i])
# Attach the corresponding value to the respective partition
y_value = float(data[y_key][i])
curves[y_key][x_value].append(y_value)
for y_key, curve in curves.iteritems():
curve_x = []
curve_y = []
for x_value, y_values in sorted(curve.iteritems()):
curve_x.append(x_value)
curve_y.append(y_values)
# create the actual plot
if show_errors:
# Create an error bar plot
if self.markertype == '':
linestyle = '-'
else:
linestyle = '--'
pylab.errorbar(x=curve_x,
y=map(numpy.mean, curve_y),
yerr=map(numpy.std, curve_y),
elinewidth=1,
capsize=5,
label=y_key,
fmt=linestyle,
marker=self.markertype)
else:
# Create a simple plot
pylab.plot(curve_x,
map(numpy.mean, curve_y),
label=y_key)
lg = pylab.legend(loc=0, fancybox=True)
lg.get_frame().set_facecolor('0.90')
lg.get_frame().set_alpha(.3)
pylab.ylabel(y_key)
# pylab.title('_plot_numeric')
if self.logscale:
fig1.gca().set_xscale('log')
left_lim = min(curve_x)**(21.0/20.0) / max(curve_x)**(1.0/20.0)
right_lim = max(curve_x)**(21.0/20.0) / min(curve_x)**(1.0/20.0)
fig1.gca().set_xlim(left_lim, right_lim)
else:
border = (max(curve_x) - min(curve_x))/20
fig1.gca().set_xlim(min(curve_x)-border, max(curve_x)+border)
return fig1, ax
def _plot_numeric_vs_numeric(self, data, result_dir, fig1, ax, im, axis_keys, value_key):
""" Contour plot of the value_keys for the two numeric parameters axis_keys.
A method that allows to create a contour plot that visualizes the effect
of differing two variables on a third one (e.g. the effect of differing
the lower and upper cutoff frequency of a bandpass filter onto
the accuracy).
**Expected parameters**
*data* : A dictionary that contains a mapping from an attribute \
(e.g. accuracy) to a list of values taken by an attribute. \
An entry is the entirety of all i-th values over all dict-values.
*result_dir*: The directory in which the plots will be saved.
*axis_keys*: The two keys of the dictionary that are assumed to have \
an effect on a third variable (the dependent variable)
*value_key*: The dependent variables whose values determine the \
color of the contour plot
"""
assert(len(axis_keys) == 2)
# Determine a sorted list of the values taken on by the axis keys:
x_values = set([float(value) for value in data[axis_keys[0]]])
x_values = sorted(list(x_values))
y_values = set([float(value) for value in data[axis_keys[1]]])
y_values = sorted(list(y_values))
# We cannot create a contour plot if one dimension is only 1d so we
# return the unchanged figure
if len(x_values) == 1 or len(y_values) == 1:
return fig1, ax, im
ax.append(fig1.add_subplot(111, label="%dC" % (ax.__len__() + 1)))
fig1.sca(ax[-1])
# Create a meshgrid of them
X, Y = pylab.meshgrid(x_values, y_values)
# Determine the average value taken on by the dependent variable
# for each combination of the the two source variables
Z = numpy.zeros((len(x_values), len(y_values)))
counter = numpy.zeros((len(x_values), len(y_values)))
for i in range(len(data[axis_keys[0]])):
x_value = float(data[axis_keys[0]][i])
y_value = float(data[axis_keys[1]][i])
value = float(data[value_key][i])
Z[x_values.index(x_value), y_values.index(y_value)] += value
counter[x_values.index(x_value), y_values.index(y_value)] += 1
Z = Z / counter
# Create the plot for this specific dependent variable
#pylab.figure()
#cf = pylab.contourf(X, Y, Z.T, N=20)
im.append(pylab.contourf(X, Y, Z.T, N=20))
#pylab.colorbar(cf)
#pylab.colorbar(cf)
if self.logscale:
fig1.gca().set_xscale('log')
fig1.gca().set_yscale('log')
pylab.xlim(min(x_values), max(x_values))
pylab.ylim(min(y_values), max(y_values))
pylab.xlabel(axis_keys[0])
pylab.ylabel(axis_keys[1])
pylab.title( value_key )
if self.logscale:
fig1.gca().set_xscale('log')
fig1.gca().set_yscale('log')
return fig1, ax, im
def _plot_numeric_vs_nominal(self, data, result_dir, fig1, ax, numeric_key,
nominal_key, value_key):
""" Plot for comparison of several different values of a nominal parameter
A method that allows to create a plot that visualizes the effect of
varying one numeric parameter onto the performance for several
different values of a nominal parameter.
**Expected parameters**
* data* : A dictionary that contains a mapping from an attribute \
(e.g. accuracy) to a list of values taken by an attribute. \
An entry is the entirety of all i-th values over all dict-values.
*result_dir*: The directory in which the plots will be saved.
*numeric_key*: The numeric parameter whose effect (together with the \
nominal parameter) onto the dependent variable should \
be investigated.
*nominal_key*: The nominal parameter whose effect (together with the \
numeric parameter) onto the dependent variable should
be investigated.
*value_key* : The dependent variables whose values determine the \
color of the contour plot
"""
ax.append(fig1.add_subplot(111, label="%d" % (ax.__len__() + 1)))
fig1.sca(ax[-1])
# Determine a mapping from the value of the nominal value to a mapping
# from the value of the numeric value to the achieved performance:
# nominal -> (numeric -> performance)
curves = defaultdict(lambda: defaultdict(list))
for i in range(len(data[nominal_key])):
curve_key = data[nominal_key][i]
parameter_value = float(data[numeric_key][i])
if value_key[0] is not "#":
performance_value = float(data[value_key][i])
else: # A weighted cost function
weight1, value_key1, weight2, value_key2 = value_key[1:].split("#")
performance_value = float(weight1) * float(data[value_key1][i]) \
+ float(weight2) * float(data[value_key2][i])
curves[curve_key][parameter_value].append(performance_value)
linecycler = itertools.cycle(['-']*7 + ['-.']*7 +
[':']*7 + ['--']*7).next
# Iterate over all values of the nominal parameter and create one curve
# in the plot showing the mapping from numeric parameter to performance
# for this particular value of the nominal parameter
minmax_x = [pylab.inf,-pylab.inf]
for curve_key, curve in curves.iteritems():
x_values = []
y_values = []
for x_value, y_value in sorted(curve.iteritems()):
x_values.append(x_value)
# Plot the mean of all values of the performance for this
# particular combination of nominal and numeric parameter
y_values.append(pylab.mean(y_value))
pylab.plot(x_values, y_values,marker=self.markertype,
label=curve_key, linestyle=linecycler())
minmax_x = [min(minmax_x[0], min(x_values)),
max(minmax_x[1], max(x_values))]
pylab.gca().set_xlabel(numeric_key.replace("_", " "))
if value_key[0] is not "#":
pylab.gca().set_ylabel(value_key.replace("_", " "))
else:
pylab.gca().set_ylabel("%s*%s+%s*%s" % tuple(value_key[1:].split("#")))
# Shrink legend if too many curves
if len(curves) > 6:
prop = matplotlib.font_manager.FontProperties(size='small')
lg = pylab.legend(prop=prop, loc=0, ncol=2,fancybox=True)
else:
lg = pylab.legend(loc=0,fancybox=True)
lg.get_frame().set_facecolor('0.90')
lg.get_frame().set_alpha(.3)
if self.logscale:
fig1.gca().set_xscale('log')
try:
left_lim = minmax_x[0]**(21.0/20.0) / minmax_x[1]**(1.0/20.0)
right_lim = minmax_x[1]**(21.0/20.0) / minmax_x[0]**(1.0/20.0)
fig1.gca().set_xlim(left_lim, right_lim)
except:
pass
else:
border = (minmax_x[1]-minmax_x[0])/20
fig1.gca().set_xlim(minmax_x[0]-border,minmax_x[1]+border)
return fig1, ax
def _plot_nominal(self, data, result_dir, fig1, ax, x_key, y_key):
""" Creates a boxplot of the y_keys for the given nominal parameter x_key.
A method that allows to create a plot that visualizes the effect
of differing one nominal variable onto a second one (e.g. the effect of
differing the classifier onto the accuracy).
**Expected parameters**
*data*: A dictionary, that contains a mapping from an attribute \
(e.g. accuracy) to a list of values taken by an attribute. \
An entry is the entirety of all i-th values over all dict-values
*result_dir*: The director in which the plots will be saved.
*x_key*: The key of the dictionary whose values should be used as \
values for the x-axis (the independent variables)
*y_key*: The key of the dictionary whose values should be used as\
values for the y-axis, i.e. the dependent variable
"""
ax.append(fig1.add_subplot(111, label="%d" % (ax.__len__() + 1)))
fig1.sca(ax[-1])
# Create the plot for this specific dependent variable
values = defaultdict(list)
for i in range(len(data[x_key])):
parameter_value = data[x_key][i]
if y_key[0] is not "#":
performance_value = float(data[y_key][i])
else: # A weighted cost function
weight1, y_key1, weight2, y_key2 = y_key[1:].split("#")
performance_value = float(weight1) * float(data[y_key1][i]) \
+ float(weight2) * float(data[y_key2][i])
values[parameter_value].append(performance_value)
values = sorted(values.items())
# values = [("Standard_vs_Target", values["Standard_vs_Target"]),
# ("MissedTarget_vs_Target", values["MissedTarget_vs_Target"])]
pylab.subplots_adjust(bottom=0.3,) # the bottom of the subplots of the figure
# pylab.boxplot(map(lambda x: x[1], values))
b = pylab.boxplot( map( lambda x: x[1], values ) )
medlines = b['medians']
medians = range(len(medlines))
for i in range(len(medians)):
medians[i] = medlines[i].get_ydata()[0]
# create array with median labels with 2 decimal places of precision
upperLabels = [str(numpy.round(m, 2)) for m in medians]
pylab.gca().set_xticklabels(map(lambda x: x[0], values))
pylab.setp(pylab.gca().get_xticklabels(), rotation= -90)
pylab.setp(pylab.gca().get_xticklabels(), size='x-small')
pylab.gca().set_xlabel(x_key.replace("_", " "))
# top = pylab.gca().get_ylim()[1]
# for i in range(len(medians)):
# pylab.gca().text(i+1,top-(top*0.05),upperLabels[i],
# horizontalalignment='center', size='x-small')
bottom = pylab.gca().get_ylim()[0]
for i in range(len(medians)):
pylab.gca().text(i+1,bottom+(bottom*0.05),upperLabels[i],
horizontalalignment='center', size='x-small')
if y_key[0] is not "#":
pylab.gca().set_ylabel(y_key.replace("_", " "))
else:
pylab.gca().set_ylabel("%s*%s+%s*%s" % tuple(y_key[1:].split("#")))
return fig1, ax
| gpl-3.0 |
wrshoemaker/ffpopsim | tests/python_lowd.py | 2 | 1442 | # vim: fdm=indent
'''
author: Fabio Zanini
date: 14/05/12
content: Test script for the python bindings to the low-dimensional
simulation
'''
# Import module
import sys
sys.path.insert(0, '../pkg/python')
import numpy as np
import matplotlib.pyplot as plt
import FFPopSim as h
# Construct class
pop = h.haploid_lowd(4)
# Test initialization
#pop.set_allele_frequencies([0,0.3,0.6,0.9], 1000)
pop.set_genotypes([1,2],[400,800])
# Test setting the recombination/mutation rates
pop.set_recombination_rates([0.01, 0.03, 0.02], h.SINGLE_CROSSOVER)
pop.set_mutation_rates([0.003,0.002,0.004,0.005],
[0.006,0.004,0.008,0.010])
# Test getting the mutation rate
print pop.get_mutation_rates(direction=0)
print pop.get_mutation_rates(direction=1)
# Test setting / getting fitness
pop.set_fitness_additive([0.02,0.03,0.04,0.01])
pop.get_fitnesses()
# Test allele frequency readout
print pop.get_allele_frequencies()
# Test evolution
gens = 100
from time import time as ti
t0 = ti()
pop.evolve(gens)
t1 = ti()
print 'Time for evolving the population for '+str(gens)+' generations: {:1.1f} s'.format(t1-t0)
# Print population size
print pop.N
# Test divergence / diversity statistics
print pop.get_divergence_statistics()
print pop.get_diversity_statistics()
# Plot histograms
plt.ion()
pop.plot_fitness_histogram()
pop.plot_divergence_histogram(color='r')
pop.plot_diversity_histogram(color='g')
| gpl-3.0 |
ChanderG/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
dsquareindia/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 38 | 11165 | # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
from sklearn.utils.testing import SkipTest, assert_equal, assert_true
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask,
dtype=np.float64)
assert_true(A.dtype == np.float64)
def test_connect_regions():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_face():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
face = face.astype(np.float32)
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(faces))
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(face, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
kgullikson88/General | Expectations.py | 1 | 5027 | import pandas as pd
import numpy as np
import pysynphot
from scipy.optimize import leastsq
from astropy import units as u
import SpectralTypeRelations
import Mamajek_Table
MS = SpectralTypeRelations.MainSequence()
MT = Mamajek_Table.MamajekTable()
MT.mam_df['radius'] = 10 ** (0.5 * MT.mam_df.logL - 2.0 * MT.mam_df.logT + 2.0 * 3.762)
teff2radius = MT.get_interpolator('Teff', 'radius')
spt2teff = MT.get_interpolator('SpTNum', 'Teff')
sptnum2mass = MT.get_interpolator('SpTNum', 'Msun')
mass2teff = MT.get_interpolator('Msun', 'Teff')
mam_Tmin = MT.mam_df.Teff.min()
mam_Tmax = MT.mam_df.Teff.max()
def get_info(row, N_sigma=2, N_pts=1e4):
if pd.notnull(row['sec_teff']):
return row['sec_teff']
# Get information from the dataframe
delta_m = row['sec_mag'] - row['pri_mag']
if pd.isnull(delta_m):
print 'delta mag is null!'
print row['sec_mag'], row['pri_mag']
return np.nan
delta_m_err = np.sqrt(row['sec_mag_err'] ** 2 + row['pri_mag_err'] ** 2)
if pd.isnull(delta_m_err) or delta_m_err < 1e-4:
delta_m_err = 0.1
teff = row['pri_teff']
filt_lam = row['filt_lam_normalized']
filt_fwhm = max(10.0, row['filt_fwhm_normalized'])
# Make the filter
filt_sigma = filt_fwhm / (2.0 * np.sqrt(2.0 * np.log(2.0)))
N_sigma = int(min(N_sigma, (filt_lam - 100) / filt_sigma))
x = np.linspace(filt_lam - filt_sigma * N_sigma, filt_lam + filt_sigma * N_sigma, N_pts)
T = np.exp(-(x - filt_lam) ** 2 / (2.0 * filt_sigma ** 2))
filt = pysynphot.ArrayBandpass(x * u.nm.to(u.angstrom), T, name='Gaussian filter')
# Fit the temperature of the secondary
T_guess = 6000.0
errfcn = lambda T, args: lnlike(T, *args)
try:
result, success = leastsq(errfcn, T_guess, args=[teff, delta_m, delta_m_err, filt, False])
except:
print(delta_m, delta_m_err, filt_lam, filt_fwhm)
result, success = leastsq(errfcn, T_guess, args=[teff, delta_m, delta_m_err, filt, True])
sec_teff = np.abs(result[0])
if success > 3:
return np.nan
return sec_teff
def safe_fcn(row, N_sigma=5, N_pts=1e4):
# T = get_info(row, N_sigma, N_pts)
try:
print row['pri_teff'].item()
T = get_info(row, N_sigma, N_pts)
return T
except KeyError, e:
print e
return np.nan
def get_radius(T):
if T > mam_Tmax:
return teff2radius(mam_Tmax)
elif T < mam_Tmin:
return teff2radius(mam_Tmin)
else:
return teff2radius(T)
def lnlike(Teff, Teff_prim, delta_mag, delta_mag_err, bandpass, debug):
"""
A log-likelihood function for secondary star temperature.
Assumes we know what the primary star temperature is, and the delta-magnitude in this band!
"""
if debug:
print('{}\t{}'.format(Teff, Teff_prim))
penalty = 0
if Teff < 500:
Teff = 500
penalty = 100
R1 = float(get_radius(Teff_prim))
R2 = float(get_radius(Teff))
# R1, R2 = 1.0, 1.0
bb_prim = pysynphot.BlackBody(Teff_prim) * R1 ** 2
obs_prim = pysynphot.Observation(bb_prim, bandpass)
bb_sec = pysynphot.BlackBody(Teff) * R2 ** 2
obs_sec = pysynphot.Observation(bb_sec, bandpass)
dm = obs_sec.effstim('abmag') - obs_prim.effstim('abmag')
return (dm - delta_mag) ** 2 / delta_mag_err ** 2 + penalty
def get_teff(pri_spt, magdiff, magdiff_err, filt_lam, filt_fwhm, pri_spt_err=1.0, N=300):
"""
Get a probability distribution for the companion temperature from the magnitude difference.
"""
sptnum = MS.SpT_To_Number(pri_spt)
sptnum_arr = np.random.normal(loc=sptnum, scale=pri_spt_err, size=N)
df = pd.DataFrame(data={'pri_teff': spt2teff(sptnum_arr),
'sec_teff': np.nan,
'pri_mag': 0.0, 'pri_mag_err': 0.0,
'sec_mag': magdiff, 'sec_mag_err': magdiff_err,
'filt_lam_normalized': filt_lam,
'filt_fwhm_normalized': filt_fwhm})
df['sec_teff'] = df.apply(lambda r: safe_fcn(r, N_sigma=2), axis=1)
return df[['pri_teff', 'sec_teff']]
"""
=============================================
SB9 Processing
=============================================
"""
def get_primary_mass(spt):
# if pd.isnull(spt):
# return np.nan
return sptnum2mass(spt)
def get_secondary_mass(M1, q):
M2 = float(M1) * q
return M2
def get_teff_sb9(pri_spt, q, pri_spt_err=1.0, N=300):
"""
Get a probability distribution for the companion temperature from the mass ratio
:param pri_spt:
:param q:
:param pri_spt_err:
:param N:
:return:
"""
sptnum = MS.SpT_To_Number(pri_spt)
sptnum_arr = np.random.normal(loc=sptnum, scale=pri_spt_err, size=N)
primary_mass = get_primary_mass(sptnum_arr)
secondary_mass = primary_mass * q
secondary_teff = mass2teff(secondary_mass)
return pd.DataFrame(data={'M1': primary_mass, 'M2': secondary_mass, 'sec_teff': secondary_teff})
| gpl-3.0 |
jeffmkw/DAT210x-Lab | Module5/assignment6.py | 1 | 9272 | import random, math
import pandas as pd
import numpy as np
import scipy.io
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# If you'd like to try this lab with PCA instead of Isomap for dimensionality
# reduction technique:
Test_PCA = False
matplotlib.style.use('ggplot') # Look Pretty
# Startng from here, the following code is for your convenience only;
# You can skip all the way down to the first 'TODO:' item.
def Plot2DBoundary(DTrain, LTrain, DTest, LTest):
# The dots are training samples (img not drawn), and the pics are testing samples (images drawn)
# Play around with the K values. This is very controlled dataset so it should be able to get perfect classification on testing entries
# Play with the K for isomap, play with the K for neighbors.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Transformed Boundary, Image Space -> 2D')
padding = 0.1 # Zoom out
resolution = 1 # Don't get too detailed; smaller values (finer rez) will take longer to compute
colors = ['blue','green','orange','red']
# ------
# Calculate the boundaries of the mesh grid. The mesh grid is
# a standard grid (think graph paper), where each point will be
# sent to the classifier (KNeighbors) to predict what class it
# belongs to. This is why KNeighbors has to be trained against
# 2D data, so we can produce this countour. Once we have the
# label for each point on the grid, we can color it appropriately
# and plot it.
x_min, x_max = DTrain[:, 0].min(), DTrain[:, 0].max()
y_min, y_max = DTrain[:, 1].min(), DTrain[:, 1].max()
x_range = x_max - x_min
y_range = y_max - y_min
x_min -= x_range * padding
y_min -= y_range * padding
x_max += x_range * padding
y_max += y_range * padding
# Using the boundaries, actually make the 2D Grid Matrix:
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution))
# What class does the classifier say about each spot on the chart?
# The values stored in the matrix are the predictions of the model
# at said location:
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the mesh grid as a filled contour plot:
plt.contourf(xx, yy, Z, cmap=plt.cm.terrain, z=-100)
# ------
# When plotting the testing images, used to validate if the algorithm
# is functioning correctly, size them as 5% of the overall chart size
x_size = x_range * 0.05
y_size = y_range * 0.05
# First, plot the images in your TEST dataset
img_num = 0
for index in LTest.index:
# DTest is a regular NDArray, so you'll iterate over that 1 at a time.
x0, y0 = DTest[img_num,0]-x_size/2., DTest[img_num,1]-y_size/2.
x1, y1 = DTest[img_num,0]+x_size/2., DTest[img_num,1]+y_size/2.
# DTest = our images isomap-transformed into 2D. But we still want
# to plot the original image, so we look to the original, untouched
# dataset (at index) to get the pixels:
img = df.iloc[index,:].reshape(num_pixels, num_pixels)
ax.imshow(img, aspect='auto', cmap=plt.cm.gray, interpolation='nearest', zorder=100000, extent=(x0, x1, y0, y1), alpha=0.8)
img_num += 1
# Plot your TRAINING points as well... as points rather than as images
for label in range(len(np.unique(LTrain))):
indices = np.where(LTrain == label)
ax.scatter(DTrain[indices, 0], DTrain[indices, 1], c=colors[label], alpha=0.8, marker='o')
# Plot
plt.show()
#
# TODO: Use the same code from Module4/assignment4.py to load up the
# face_data.mat file into a dataset called "df". Be sure to calculate
# the num_pixels value, and to rotate the images to being right-side-up
# instead of sideways. This was demonstrated in the M4/A4 code:
# https://github.com/authman/DAT210x/blob/master/Module4/assignment4.py#L31-L41
#
mat = scipy.io.loadmat('Datasets/face_data.mat')
df = pd.DataFrame(mat['images']).T
num_images, num_pixels = df.shape
num_pixels = int(math.sqrt(num_pixels))
print(df.shape)
# Rotate the pictures, so we don't have to crane our necks:
for i in range(num_images):
df.loc[i,:] = df.loc[i,:].reshape(num_pixels, num_pixels).T.reshape(-1)
#
# TODO: Load up your face_labels dataset. It only has a single column, and
# you're only interested in that single column. You will have to slice the
# column out so that you have access to it as a "Series" rather than as a
# "Dataframe". This was discussed in the the "Slicin'" lecture of the
# "Manipulating Data" reading on the course website. Use an appropriate
# indexer to take care of that. Be sure to print out the labels and compare
# what you see to the raw face_labels.csv so you know you loaded it correctly.
#
# .. your code here ..
face_labels = pd.read_csv('Datasets/face_labels.csv')
face_labels = face_labels.iloc[:,0]
#print(type(face_labels))
#print(face_labels)
print(len(df), len(face_labels))
df = df.iloc[1:,]
print(len(df), len(face_labels))
#
# TODO: Do train_test_split. Use the same code as on the EdX platform in the
# reading material, but set the random_state=7 for reproduceability, and the
# test_size to 0.15 (150%). Your labels are actually passed in as a series
# (instead of as an NDArray) so that you can access their underlying indices
# later on. This is necessary so you can find your samples in the original
# dataframe. The convenience methods we've written for you that handle drawing
# expect this, so that they can plot your testing data as images rather than
# as points:
#
# .. your code here ..
from sklearn.model_selection import train_test_split
data_train, data_test, label_train, label_test = train_test_split(df, face_labels, test_size = 0.15, random_state = 7)
if Test_PCA:
# INFO: PCA is used *before* KNeighbors to simplify your high dimensionality
# image samples down to just 2 principal components! A lot of information
# (variance) is lost during the process, as I'm sure you can imagine. But
# you have to drop the dimension down to two, otherwise you wouldn't be able
# to visualize a 2D decision surface / boundary. In the wild, you'd probably
# leave in a lot more dimensions, which is better for higher accuracy, but
# worse for visualizing the decision boundary;
#
# Your model should only be trained (fit) against the training data (data_train)
# Once you've done this, you need use the model to transform both data_train
# and data_test from their original high-D image feature space, down to 2D
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(data_train)
#
#
# TODO: Implement PCA here. ONLY train against your training data, but
# transform both your training + test data, storing the results back into
# data_train, and data_test.
#
# .. your code here ..
data_train = pca.transform(data_train)
data_test = pca.transform(data_test)
else:
# INFO: Isomap is used *before* KNeighbors to simplify your high dimensionality
# image samples down to just 2 components! A lot of information has been is
# lost during the process, as I'm sure you can imagine. But if you have
# non-linear data that can be represented on a 2D manifold, you probably will
# be left with a far superior dataset to use for classification. Plus by
# having the images in 2D space, you can plot them as well as visualize a 2D
# decision surface / boundary. In the wild, you'd probably leave in a lot more
# dimensions, which is better for higher accuracy, but worse for visualizing the
# decision boundary;
#
# Your model should only be trained (fit) against the training data (data_train)
# Once you've done this, you need use the model to transform both data_train
# and data_test from their original high-D image feature space, down to 2D
#
# TODO: Implement Isomap here. ONLY train against your training data, but
# transform both your training + test data, storing the results back into
# data_train, and data_test.
#
# .. your code here ..
from sklearn.manifold import Isomap
iso = Isomap(n_neighbors=5, n_components=2)
iso.fit(data_train)
data_train = iso.transform(data_train)
data_test = iso.transform(data_test)
#
# TODO: Implement KNeighborsClassifier here. You can use any K value from 1
# through 20, so play around with it and attempt to get good accuracy.
# Fit the classifier against your training data and labels.
#
# .. your code here ..
from sklearn.neighbors import KNeighborsClassifier
for number in range(1,20):
model = KNeighborsClassifier(n_neighbors=number)
model.fit(data_train,label_train)
#
# TODO: Calculate + Print the accuracy of the testing set (data_test and
# label_test).
#
# .. your code here ..
acc = model.score(data_test, label_test)
print('K = ', number, ', accuracy = ', acc)
# Chart the combined decision boundary, the training data as 2D plots, and
# the testing data as small images so we can visually validate performance.
Plot2DBoundary(data_train, label_train, data_test, label_test)
#
# TODO:
# After submitting your answers, expriment with using using PCA instead of
# ISOMap. Are the results what you expected? Also try tinkering around with
# the test/train split percentage from 10-20%. Notice anything?
#
| mit |
anirudhjayaraman/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
jakobworldpeace/scikit-learn | examples/svm/plot_custom_kernel.py | 93 | 1562 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired, edgecolors='k')
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
wazeerzulfikar/scikit-learn | sklearn/tests/test_metaestimators.py | 30 | 5040 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.utils.validation import check_is_fitted
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
from sklearn.exceptions import NotFittedError
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
check_is_fitted(self, 'coef_')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises a NotFittedError
assert_raises(NotFittedError, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
StongeEtienne/dipy | setup_helpers.py | 11 | 14073 | ''' Distutils / setuptools helpers
'''
import os
import sys
from os.path import join as pjoin, split as psplit, splitext, dirname, exists
import tempfile
import shutil
from distutils.version import LooseVersion
from distutils.command.install_scripts import install_scripts
from distutils.errors import CompileError, LinkError
from distutils import log
BAT_TEMPLATE = \
r"""@echo off
REM wrapper to use shebang first line of {FNAME}
set mypath=%~dp0
set pyscript="%mypath%{FNAME}"
set /p line1=<%pyscript%
if "%line1:~0,2%" == "#!" (goto :goodstart)
echo First line of %pyscript% does not start with "#!"
exit /b 1
:goodstart
set py_exe=%line1:~2%
REM quote exe in case of spaces in path name
set py_exe="%py_exe%"
call %py_exe% %pyscript% %*
"""
# Path of file to which to write C conditional vars from build-time checks
CONFIG_H = pjoin('build', 'config.h')
# File name (no directory) to which to write Python vars from build-time checks
CONFIG_PY = '__config__.py'
# Directory to which to write libraries for building
LIB_DIR_TMP = pjoin('build', 'extra_libs')
class install_scripts_bat(install_scripts):
""" Make scripts executable on Windows
Scripts are bare file names without extension on Unix, fitting (for example)
Debian rules. They identify as python scripts with the usual ``#!`` first
line. Unix recognizes and uses this first "shebang" line, but Windows does
not. So, on Windows only we add a ``.bat`` wrapper of name
``bare_script_name.bat`` to call ``bare_script_name`` using the python
interpreter from the #! first line of the script.
Notes
-----
See discussion at
http://matthew-brett.github.com/pydagogue/installing_scripts.html and
example at git://github.com/matthew-brett/myscripter.git for more
background.
"""
def run(self):
install_scripts.run(self)
if not os.name == "nt":
return
for filepath in self.get_outputs():
# If we can find an executable name in the #! top line of the script
# file, make .bat wrapper for script.
with open(filepath, 'rt') as fobj:
first_line = fobj.readline()
if not (first_line.startswith('#!') and
'python' in first_line.lower()):
log.info("No #!python executable found, skipping .bat "
"wrapper")
continue
pth, fname = psplit(filepath)
froot, ext = splitext(fname)
bat_file = pjoin(pth, froot + '.bat')
bat_contents = BAT_TEMPLATE.replace('{FNAME}', fname)
log.info("Making %s wrapper for %s" % (bat_file, filepath))
if self.dry_run:
continue
with open(bat_file, 'wt') as fobj:
fobj.write(bat_contents)
def add_flag_checking(build_ext_class, flag_defines, top_package_dir=''):
""" Override input `build_ext_class` to check compiler `flag_defines`
Parameters
----------
build_ext_class : class
Class implementing ``distutils.command.build_ext.build_ext`` interface,
with a ``build_extensions`` method.
flag_defines : sequence
A sequence of elements, where the elements are sequences of length 4
consisting of (``compile_flags``, ``link_flags``, ``code``,
``defvar``). ``compile_flags`` is a sequence of compiler flags;
``link_flags`` is a sequence of linker flags. We
check ``compile_flags`` to see whether a C source string ``code`` will
compile, and ``link_flags`` to see whether the resulting object file
will link. If both compile and link works, we add ``compile_flags`` to
``extra_compile_args`` and ``link_flags`` to ``extra_link_args`` of
each extension when we build the extensions. If ``defvar`` is not
None, it is the name of C variable to be defined in ``build/config.h``
with 1 if the combination of (``compile_flags``, ``link_flags``,
``code``) will compile and link, 0 otherwise. If None, do not write
variable.
top_package_dir : str
String giving name of top-level package, for writing Python file
containing configuration variables. If empty, do not write this file.
Variables written are the same as the Cython variables generated via
the `flag_defines` setting.
Returns
-------
checker_class : class
A class with similar interface to
``distutils.command.build_ext.build_ext``, that adds all working
``compile_flags`` values to the ``extra_compile_args`` and working
``link_flags`` to ``extra_link_args`` attributes of extensions, before
compiling.
"""
class Checker(build_ext_class):
flag_defs = tuple(flag_defines)
def can_compile_link(self, compile_flags, link_flags, code):
cc = self.compiler
fname = 'test.c'
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
with open(fname, 'wt') as fobj:
fobj.write(code)
try:
objects = cc.compile([fname],
extra_postargs=compile_flags)
except CompileError:
return False
try:
# Link shared lib rather then executable to avoid
# http://bugs.python.org/issue4431 with MSVC 10+
cc.link_shared_lib(objects, "testlib",
extra_postargs=link_flags)
except (LinkError, TypeError):
return False
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
return True
def build_extensions(self):
""" Hook into extension building to check compiler flags """
def_vars = []
good_compile_flags = []
good_link_flags = []
config_dir = dirname(CONFIG_H)
for compile_flags, link_flags, code, def_var in self.flag_defs:
compile_flags = list(compile_flags)
link_flags = list(link_flags)
flags_good = self.can_compile_link(compile_flags,
link_flags,
code)
if def_var:
def_vars.append((def_var, flags_good))
if flags_good:
good_compile_flags += compile_flags
good_link_flags += link_flags
else:
log.warn("Flags {0} omitted because of compile or link "
"error".format(compile_flags + link_flags))
if def_vars: # write config.h file
if not exists(config_dir):
self.mkpath(config_dir)
with open(CONFIG_H, 'wt') as fobj:
fobj.write('/* Automatically generated; do not edit\n')
fobj.write(' C defines from build-time checks */\n')
for v_name, v_value in def_vars:
fobj.write('int {0} = {1};\n'.format(
v_name, 1 if v_value else 0))
if def_vars and top_package_dir: # write __config__.py file
config_py_dir = (top_package_dir if self.inplace else
pjoin(self.build_lib, top_package_dir))
if not exists(config_py_dir):
self.mkpath(config_py_dir)
config_py = pjoin(config_py_dir, CONFIG_PY)
with open(config_py, 'wt') as fobj:
fobj.write('# Automatically generated; do not edit\n')
fobj.write('# Variables from compile checks\n')
for v_name, v_value in def_vars:
fobj.write('{0} = {1}\n'.format(v_name, v_value))
if def_vars or good_compile_flags or good_link_flags:
for ext in self.extensions:
ext.extra_compile_args += good_compile_flags
ext.extra_link_args += good_link_flags
if def_vars:
ext.include_dirs.append(config_dir)
build_ext_class.build_extensions(self)
return Checker
def get_pkg_version(pkg_name):
""" Return package version for `pkg_name` if installed
Returns
-------
pkg_version : str or None
Return None if package not importable. Return 'unknown' if standard
``__version__`` string not present. Otherwise return version string.
"""
try:
pkg = __import__(pkg_name)
except ImportError:
return None
try:
return pkg.__version__
except AttributeError:
return 'unknown'
def version_error_msg(pkg_name, found_ver, min_ver):
""" Return informative error message for version or None
"""
if found_ver is None:
return 'We need package {0}, but not importable'.format(pkg_name)
if found_ver == 'unknown':
return 'We need {0} version {1}, but cannot get version'.format(
pkg_name, min_ver)
if LooseVersion(found_ver) >= LooseVersion(min_ver):
return None
return 'We need {0} version {1}, but found version {2}'.format(
pkg_name, found_ver, min_ver)
class SetupDependency(object):
""" SetupDependency class
Parameters
----------
import_name : str
Name with which required package should be ``import``ed.
min_ver : str
Distutils version string giving minimum version for package.
req_type : {'install_requires', 'setup_requires'}, optional
Setuptools dependency type.
heavy : {False, True}, optional
If True, and package is already installed (importable), then do not add
to the setuptools dependency lists. This prevents setuptools
reinstalling big packages when the package was installed without using
setuptools, or this is an upgrade, and we want to avoid the pip default
behavior of upgrading all dependencies.
install_name : str, optional
Name identifying package to install from pypi etc, if different from
`import_name`.
"""
def __init__(self, import_name,
min_ver,
req_type='install_requires',
heavy=False,
install_name=None):
self.import_name = import_name
self.min_ver = min_ver
self.req_type = req_type
self.heavy = heavy
self.install_name = (import_name if install_name is None
else install_name)
def check_fill(self, setuptools_kwargs):
""" Process this dependency, maybe filling `setuptools_kwargs`
Run checks on this dependency. If not using setuptools, then raise
error for unmet dependencies. If using setuptools, add missing or
not-heavy dependencies to `setuptools_kwargs`.
A heavy dependency is one that is inconvenient to install
automatically, such as numpy or (particularly) scipy, matplotlib.
Parameters
----------
setuptools_kwargs : dict
Dictionary of setuptools keyword arguments that may be modified
in-place while checking dependencies.
"""
found_ver = get_pkg_version(self.import_name)
ver_err_msg = version_error_msg(self.import_name,
found_ver,
self.min_ver)
if not 'setuptools' in sys.modules:
# Not using setuptools; raise error for any unmet dependencies
if ver_err_msg is not None:
raise RuntimeError(ver_err_msg)
return
# Using setuptools; add packages to given section of
# setup/install_requires, unless it's a heavy dependency for which we
# already have an acceptable importable version.
if self.heavy and ver_err_msg is None:
return
new_req = '{0}>={1}'.format(self.import_name, self.min_ver)
old_reqs = setuptools_kwargs.get(self.req_type, [])
setuptools_kwargs[self.req_type] = old_reqs + [new_req]
class Bunch(object):
def __init__(self, vars):
for key, name in vars.items():
if key.startswith('__'):
continue
self.__dict__[key] = name
def read_vars_from(ver_file):
""" Read variables from Python text file
Parameters
----------
ver_file : str
Filename of file to read
Returns
-------
info_vars : Bunch instance
Bunch object where variables read from `ver_file` appear as
attributes
"""
# Use exec for compabibility with Python 3
ns = {}
with open(ver_file, 'rt') as fobj:
exec(fobj.read(), ns)
return Bunch(ns)
def make_np_ext_builder(build_ext_class):
""" Override input `build_ext_class` to add numpy includes to extension
This is useful to delay call of ``np.get_include`` until the extension is
being built.
Parameters
----------
build_ext_class : class
Class implementing ``distutils.command.build_ext.build_ext`` interface,
with a ``build_extensions`` method.
Returns
-------
np_build_ext_class : class
A class with similar interface to
``distutils.command.build_ext.build_ext``, that adds libraries in
``np.get_include()`` to include directories of extension.
"""
class NpExtBuilder(build_ext_class):
def build_extensions(self):
""" Hook into extension building to add np include dirs
"""
# Delay numpy import until last moment
import numpy as np
for ext in self.extensions:
ext.include_dirs.append(np.get_include())
build_ext_class.build_extensions(self)
return NpExtBuilder
| bsd-3-clause |
moutai/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 25 | 11187 | # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask,
dtype=np.float64)
assert_true(A.dtype == np.float64)
def test_connect_regions():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_face():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
face = face.astype(np.float32)
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(faces))
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(face, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
eike-welk/clair | src/clairweb/libclair/test/test_prices.py | 1 | 24394 | # -*- coding: utf-8 -*-
###############################################################################
# Clair - Project to discover prices on e-commerce sites. #
# #
# Copyright (C) 2013 by Eike Welk #
# eike.welk@gmx.net #
# #
# License: GPL Version 3 #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
"""
Price estimation algorithms.
"""
from __future__ import division
from __future__ import absolute_import
#import pytest #contains `skip`, `fail`, `raises`, `config`
import time
import os.path as path
import numpy as np
from numpy import array, dot, abs #sqrt, sum
from numpy.linalg import norm
import matplotlib.pylab as pl
#Set up logging fore useful debug output, and time stamps in UTC.
import logging
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',
level=logging.DEBUG)
#Time stamps must be in UTC
logging.Formatter.converter = time.gmtime
def relative(*path_comps):
"Create file path_comps that are relative to the location of this file."
return path.abspath(path.join(path.dirname(__file__), *path_comps))
def test_PriceEstimator_find_observed_prices():
"Test price computation for listings with only a single product."
from clair.coredata import DataStore
from clair.prices import PriceEstimator
print "start"
data = DataStore()
data.read_data(relative("../../example-data"))
test_listings = data.listings.ix[0:20]
print test_listings
estimator = PriceEstimator()
prices = estimator.find_observed_prices(test_listings)
print prices.to_string()
#TODO: assertions
print "finshed"
def test_PriceEstimator_compute_product_occurrence_matrix():
"Test construction of matrix for linear least square algorithm."
from clair.coredata import DataStore
from clair.prices import PriceEstimator
print "start"
data = DataStore()
data.read_data(relative("../../example-data"))
test_listings = data.listings.ix[0:20]
print test_listings
print test_listings.to_string(columns=["products", "price"])
product_ids = [u'nikon-d70', u'nikon-d90', u'nikon-sb-24', u'nikon-sb-26',
u'nikon-18-70-f/3.5-4.5--1', u'nikon-18-105-f/3.5-5.6--1',
u'nikon-28-85-f/3.5-4.5--1']
estimator = PriceEstimator()
matrix, prices, listing_ids, product_ids = \
estimator.compute_product_occurrence_matrix(test_listings, product_ids)
print
print "matrix:\n", matrix
print "matrix rank:", np.linalg.matrix_rank(matrix)
print "number products:", len(product_ids)
print "prices:\n", prices
print "listing_ids:\n", listing_ids
print "product_ids:\n", product_ids
#TODO: assertions
print "finshed"
def test_PriceEstimator_solve_prices_lstsq_1():
"Test linear least square algorithm with real data."
from clair.coredata import DataStore
from clair.prices import PriceEstimator
print "start"
data = DataStore()
data.read_data(relative("../../example-data"))
#Take a small amount of test data.
listings = data.listings.ix[0:50]
# listings = data.listings
# product_ids = [p.id for p in data.products]
product_ids = [u'nikon-d70', u'nikon-d90', u'nikon-sb-24', u'nikon-sb-26',
u'nikon-18-70-f/3.5-4.5--1', u'nikon-18-105-f/3.5-5.6--1',
u'nikon-28-85-f/3.5-4.5--1']
print listings
print listings.to_string(columns=["products", "price"])
estimator = PriceEstimator()
#Create matrix and vectors for linear least square
matrix, listing_prices, listing_ids, product_ids = \
estimator.compute_product_occurrence_matrix(listings, product_ids)
print
print "matrix:\n", matrix
print "matrix rank:", np.linalg.matrix_rank(matrix)
print "number products:", len(product_ids)
print "listing_prices:\n", listing_prices
print "listing_ids:\n", listing_ids
print "product_ids:\n", product_ids
product_prices, good_rows, good_cols, problem_products = \
estimator.solve_prices_lstsq(matrix, listing_prices,
listing_ids, product_ids)
print "product_prices:\n", product_prices * 0.7
#TODO: assertions
print "finshed"
def test_PriceEstimator_solve_prices_lstsq_2():
"Test linear least square algorithm with artificial data."
from clair.prices import PriceEstimator
def print_vals():
print "matrix:\n", matrix
print "matrix rank:", np.linalg.matrix_rank(matrix)
print "number products:", len(product_ids)
print "listing_prices:\n", listing_prices
print "listing_ids:\n", listing_ids
print "product_ids:\n", product_ids
print "product_prices:\n", product_prices
print "real_prices:\n", real_prices
print "good_rows:\n", good_rows
print "good_cols:\n", good_cols
print "problem_products:\n", problem_products
print "start"
estimator = PriceEstimator()
#Listing IDs, unimportant in this test.
listing_ids = array(["l1", "l2", "l3", "l4", "l5",
"l6", "l7", "l8", "l9", "l10"])
#Product IDs, and "real" prices for checking errors
product_ids = array(["a", "b", "c", "d", "e"])
real_prices = array([500, 200, 100, 50., 5.])
print "Matrix has full rank, no noise ---------------------------------"
#Matrix that represents the listings, each row is a listing
matrix = array([[ 1., 0., 0., 0., 0.,],
[ 1., 0., 0., 0., 0.,],
[ 0., 1., 0., 0., 0.,],
[ 0., 1., 0., 0., 0.,],
[ 1., 1., 0., 0., 0.,],
[ 1., 0., 1., 0., 0.,],
[ 0., 0., 1., 1., 0.,],
[ 0., 0., 1., 0., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 1., 1., 1., 1., 1.,],
])
#compute listing prices from the real prices
listing_prices = dot(matrix, real_prices)
#Compute the product prices
product_prices, good_rows, good_cols, problem_products = \
estimator.solve_prices_lstsq(matrix, listing_prices,
listing_ids, product_ids)
print_vals()
np.testing.assert_allclose(product_prices, real_prices)
print "\nMatrix has full rank, with noise --------------------------------"
#compute listing prices with noise
listing_prices = dot(matrix, real_prices)
listing_prices += np.random.normal(0, 0.1, (10,)) * listing_prices
#Compute the product prices
product_prices, good_rows, good_cols, problem_products = \
estimator.solve_prices_lstsq(matrix, listing_prices,
listing_ids, product_ids)
print_vals()
err_norm = norm(product_prices - real_prices)
print "Error norm:", err_norm
res_good = np.asarray(abs(product_prices - real_prices)
< real_prices * 0.2, dtype=int)
print "Number of results exact to 20%:", sum(res_good)
#This test might occasionally fail because current noise is too large.
assert sum(res_good) >= 3
print "\nMatrix has insufficient rank, no noise ---------------------------------"
#Matrix that represents the listings, each row is a listing
matrix = array([[ 1., 0., 0., 0., 0.,],
[ 1., 0., 0., 0., 0.,],
[ 0., 1., 0., 0., 0.,],
[ 0., 1., 0., 0., 0.,],
[ 1., 1., 0., 0., 0.,],
[ 1., 0., 1., 0., 0.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 1., 1., 1., 0., 0.,],
])
#compute listing prices from the real prices
listing_prices = dot(matrix, real_prices)
#Compute the product prices
product_prices, good_rows, good_cols, problem_products = \
estimator.solve_prices_lstsq(matrix, listing_prices,
listing_ids, product_ids)
print_vals()
np.testing.assert_allclose(product_prices[0:3], real_prices[0:3])
print "\nMatrix has insufficient rank, no noise ---------------------------------"
#Pathological case for the current algorithm
matrix = array([[ 1., 0., 0., 1., 1.,],
[ 0., 1., 0., 1., 1.,],
[ 0., 0., 1., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,], ])
#compute listing prices from the real prices
listing_prices = dot(matrix, real_prices)
#Compute the product prices
product_prices, good_rows, good_cols, problem_products = \
estimator.solve_prices_lstsq(matrix, listing_prices,
listing_ids, product_ids)
print_vals()
np.testing.assert_allclose(product_prices[0:3], real_prices[0:3])
print "Matrix is 1*2 ------------------------"
#Listing IDs, unimportant in this test.
listing_ids = array(["l1"])
#Product IDs, and "real" prices for checking errors
product_ids = array(["a", "b"])
real_prices = array([500, 200.])
#Matrix that represents the listings, each row is a listing
matrix = array([[0.7, 0]])
#compute listing prices from the real prices
listing_prices = dot(matrix, real_prices)
#Compute the product prices
product_prices, good_rows, good_cols, problem_products = \
estimator.solve_prices_lstsq(matrix, listing_prices,
listing_ids, product_ids)
print_vals()
np.testing.assert_allclose(product_prices[good_cols],
real_prices[good_cols])
print "Matrix is 1*1 (but has full rank, no noise) ------------------------"
#Listing IDs, unimportant in this test.
listing_ids = array(["l1"])
#Product IDs, and "real" prices for checking errors
product_ids = array(["a"])
real_prices = array([500])
#Matrix that represents the listings, each row is a listing
matrix = array([[0.7]])
#compute listing prices from the real prices
listing_prices = dot(matrix, real_prices)
#Compute the product prices
product_prices, good_rows, good_cols, problem_products = \
estimator.solve_prices_lstsq(matrix, listing_prices,
listing_ids, product_ids)
print_vals()
np.testing.assert_allclose(product_prices[good_cols],
real_prices[good_cols])
print "finshed"
def test_PriceEstimator_find_problems_rank_deficient_matrix():
"Test linear least square algorithm with artificial data."
from clair.prices import PriceEstimator
def print_all():
# print "matrix_new:\n", matrix_new
print "good_rows:", good_rows
print "good_cols:", good_cols
print "problem_products:", problem_products
estimator = PriceEstimator()
print "Matrix has full rank ---------------------------------"
#Matrix that represents the listings, each row is a listing
matrix = array([[ 1., 0., 0., 0., 0.,],
[ 1., 0., 0., 0., 0.,],
[ 0., 1., 0., 0., 0.,],
[ 0., 1., 0., 0., 0.,],
[ 1., 1., 0., 0., 0.,],
[ 1., 0., 1., 0., 0.,],
[ 0., 0., 1., 1., 0.,],
[ 0., 0., 1., 0., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 1., 1., 1., 1., 1.,],
])
good_rows, good_cols, problem_products = \
estimator.find_problems_rank_deficient_matrix(matrix)
print_all()
assert all(good_cols == [True, True, True, True, True])
assert problem_products == []
print "\nMatrix has insufficient rank ---------------------------------"
matrix = array([[ 1., 0., 0., 0., 0.,],
[ 1., 0., 0., 0., 0.,],
[ 0., 1., 0., 0., 0.,],
[ 0., 1., 0., 0., 0.,],
[ 1., 1., 0., 0., 0.,],
[ 1., 0., 1., 0., 0.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 1., 1., 1., 0., 0.,],
])
good_rows, good_cols, problem_products = \
estimator.find_problems_rank_deficient_matrix(matrix)
print_all()
assert all(good_cols == [True, True, True, False, False])
assert problem_products == ["3", "4"]
print "\nMatrix has insufficient rank, pathological case ----------------"
#Pathological case for the current algorithm
matrix = array([[ 1., 0., 0., 1., 1.,],
[ 0., 1., 0., 1., 1.,],
[ 0., 0., 1., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 0., 0., 0., 1., 1.,],
])
good_rows, good_cols, problem_products = \
estimator.find_problems_rank_deficient_matrix(matrix)
print_all()
assert all(good_cols == [True, True, True, False, False])
assert problem_products == ["3", "4"]
print "\nPathological case shape = (1, 2) ----------------"
#Pathological case for the current algorithm
matrix = array([[ 0.7, 0.]])
good_rows, good_cols, problem_products = \
estimator.find_problems_rank_deficient_matrix(matrix)
print_all()
assert all(good_cols == [True, False])
assert problem_products == ["1"]
def test_PriceEstimator_create_prices_lstsq_soln_1():
"Test creation of price records with real data."
from clair.coredata import DataStore
from clair.prices import PriceEstimator
print "start"
data = DataStore()
data.read_data(relative("../../example-data"))
#Use all data as test data
# listings = data.listings
product_ids = [p.id for p in data.products
if not p.id.startswith("xxx-unknown")]
# #Take a small amount of test data.
listings = data.listings.ix[0:200]
# product_ids = [u'nikon-d70', u'nikon-d90', u'nikon-sb-24', u'nikon-sb-26',
# u'nikon-18-70-f/3.5-4.5--1', u'nikon-18-105-f/3.5-5.6--1',
# u'nikon-28-85-f/3.5-4.5--1']
print listings
# print listings.to_string(columns=["products", "price"])
estimator = PriceEstimator()
#Create matrix and vectors for linear least square
matrix, listing_prices, listing_ids, product_ids = \
estimator.compute_product_occurrence_matrix(listings, product_ids)
# print
# print "matrix:\n", matrix
# print "matrix rank:", np.linalg.matrix_rank(matrix)
# print "number products:", len(product_ids)
# print "listing_prices:\n", listing_prices
# print "listing_ids:\n", listing_ids
# print "product_ids:\n", product_ids
#Compute average product prices
product_prices, good_rows, good_cols, problem_products = \
estimator.solve_prices_lstsq(matrix, listing_prices,
listing_ids, product_ids)
#Create price records
prices = estimator.create_prices_lstsq_soln(matrix,
listing_prices, listing_ids,
product_prices, product_ids,
good_rows, good_cols, listings)
# print prices.to_string()
#TODO: assertions
print "finshed"
def test_PriceEstimator_create_prices_lstsq_soln_2():
"""
Test creation of price records from solution of linear
least square problem, with artificial data.
"""
from clair.prices import PriceEstimator
def print_vals():
print "matrix:\n", matrix
print "matrix rank:", np.linalg.matrix_rank(matrix)
print "number products:", len(product_ids)
print "listing_prices:\n", listing_prices
print "listing_ids:\n", listing_ids
print "product_ids:\n", product_ids
print "product_prices:\n", product_prices
print "real_prices:\n", real_prices
print "start"
estimator = PriceEstimator()
#Listing IDs, unimportant in this test.
listing_ids = array(["l1", "l2", "l3", "l4", "l5",
"l6", "l7", "l8", "l9", "l10"])
#Product IDs, and "real" prices for checking errors
product_ids = array(["a", "b", "c", "d", "e"])
real_prices = array([500, 200, 100, 50., 5.])
print "Matrix has full rank, no noise ---------------------------------"
#Matrix that represents the listings, each row is a listing
matrix = array([[ 1., 0., 0., 0., 0.,],
[ 1., 0., 0., 0., 0.,],
[ 0., 1., 0., 0., 0.,],
[ 0., 1., 0., 0., 0.,],
[ 1., 1., 0., 0., 0.,],
[ 1., 0., 1., 0., 0.,],
[ 0., 0., 1., 1., 0.,],
[ 0., 0., 1., 0., 1.,],
[ 0., 0., 0., 1., 1.,],
[ 1., 1., 1., 1., 1.,],
])
#compute listing prices from the real prices
listing_prices = dot(matrix, real_prices)
#Compute the product prices
product_prices, good_rows, good_cols, problem_products = \
estimator.solve_prices_lstsq(matrix, listing_prices,
listing_ids, product_ids)
print_vals()
prices = estimator.create_prices_lstsq_soln(matrix,
listing_prices, listing_ids,
product_prices, product_ids,
good_cols, good_rows)
print "prices:\n", prices.to_string()
true_prices = prices["price"] / prices["condition"]
prices_a = true_prices[prices["product"] == "a"]
prices_b = true_prices[prices["product"] == "b"]
prices_c = true_prices[prices["product"] == "c"]
prices_d = true_prices[prices["product"] == "d"]
prices_e = true_prices[prices["product"] == "e"]
np.testing.assert_allclose(prices_a, 500)
np.testing.assert_allclose(prices_b, 200)
np.testing.assert_allclose(prices_c, 100)
np.testing.assert_allclose(prices_d, 50)
np.testing.assert_allclose(prices_e, 5)
print "Matrix is 1*1 (but has full rank, no noise) ------------------------"
#Listing IDs, unimportant in this test.
listing_ids = array(["l1"])
#Product IDs, and "real" prices for checking errors
product_ids = array(["a"])
real_prices = array([500])
#Matrix that represents the listings, each row is a listing
matrix = array([[0.7]])
#compute listing prices from the real prices
listing_prices = dot(matrix, real_prices)
#Compute the product prices
product_prices, good_rows, good_cols, problem_products = \
estimator.solve_prices_lstsq(matrix, listing_prices,
listing_ids, product_ids)
print_vals()
prices = estimator.create_prices_lstsq_soln(matrix,
listing_prices, listing_ids,
product_prices, product_ids,
good_cols, good_rows)
print "prices:\n", prices.to_string()
true_prices = prices["price"] / prices["condition"]
prices_a = true_prices[prices["product"] == "a"]
np.testing.assert_allclose(prices_a, 500)
def test_PriceEstimator_compute_prices_1():
"Test main method for creation of price records with real data."
from clair.coredata import DataStore
from clair.prices import PriceEstimator
print "start"
data = DataStore()
data.read_data(relative("../../example-data"))
#Use all data as test data
listings = data.listings
# product_ids = [p.id for p in data.products
# if not p.id.startswith("xxx-unknown")]
# #Take a small amount of test data.
# listings = data.listings.ix[0:50]
# product_ids = [u'nikon-d70', u'nikon-d90', u'nikon-sb-24', u'nikon-sb-26',
# u'nikon-18-70-f/3.5-4.5--1', u'nikon-18-105-f/3.5-5.6--1',
# u'nikon-28-85-f/3.5-4.5--1']
# print listings
print listings.to_string(columns=["products", "price"])
estimator = PriceEstimator()
prices = estimator.compute_prices(listings, data.products,
time_start=None, time_end=None,
avg_period="week")
# print prices.to_string()
prices = prices.sort("time")
prices_d90 = prices.ix[prices["product"] == "nikon-d90"]
pl.plot(prices_d90["time"].tolist(), prices_d90["price"].tolist())
prices_sb26 = prices.ix[prices["product"] == "nikon-sb-26"]
prices_sb26.set_index("time", inplace=True, verify_integrity=False)
prices_sb26["price"].plot()
prices_sb24 = prices.ix[prices["product"] == "nikon-sb-24"]
prices_sb24.set_index("time", inplace=True, verify_integrity=False)
prices_sb24["price"].plot()
# pl.plot(prices_sb24["time"], prices_d90["price"])
# pl.show()
#TODO: assertions
print "finshed"
if __name__ == "__main__":
# test_PriceEstimator_find_observed_prices()
# test_PriceEstimator_compute_product_occurrence_matrix()
# test_PriceEstimator_solve_prices_lstsq_1()
test_PriceEstimator_solve_prices_lstsq_2()
# test_PriceEstimator_find_problems_rank_deficient_matrix()
# test_PriceEstimator_create_prices_lstsq_soln_1()
# test_PriceEstimator_create_prices_lstsq_soln_2()
# test_PriceEstimator_compute_prices_1()
pass #IGNORE:W0107
| gpl-3.0 |
ales-erjavec/scipy | scipy/interpolate/interpolate.py | 25 | 80287 | """ Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'RegularGridInterpolator',
'interpn']
import itertools
from numpy import (shape, sometrue, array, transpose, searchsorted,
ones, logical_or, atleast_1d, atleast_2d, ravel,
dot, poly1d, asarray, intp)
import numpy as np
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
import math
import warnings
import functools
import operator
from scipy._lib.six import xrange, integer_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
def reduce_sometrue(a):
all = a
while len(shape(all)) > 1:
all = sometrue(all, axis=0)
return all
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'
where 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of first, second or third order) or as an integer
specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised.
fill_value : float, optional
If provided, then this value will be used to fill in for requested
points outside of the data range. If not provided, then the default
is NaN.
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=True, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.copy = copy
self.bounds_error = bounds_error
self.fill_value = fill_value
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0,'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
y = self._reshape_yi(y)
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
self.x_bds = (x[1:] + x[:-1]) / 2.0
self._call = self.__class__._call_nearest
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
self._spline = splmake(x, y, order=order)
self._call = self.__class__._call_spline
if len(x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
self._kind = kind
self.x = x
self._y = y
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return spleval(self._spline, x_new)
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
out_of_bounds = self._check_bounds(x_new)
y_new = self._call(self, x_new)
if len(y_new) > 0:
y_new[out_of_bounds] = self.fill_value
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
out_of_bounds = logical_or(below_bounds, above_bounds)
return out_of_bounds
class _PPolyBase(object):
"""
Base class for piecewise polynomials.
"""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
if np.any(self.x[1:] - self.x[:-1] < 0):
raise ValueError("x-coordinates are not in increasing order")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=True):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals
``self.x[-1] <= x < x_right[0]``, ``x_right[0] <= x < x_right[1]``,
..., ``x_right[m-2] <= x < x_right[m-1]``
x : ndarray, size (m,)
Additional breakpoints. Must be sorted and either to
the right or to the left of the current breakpoints.
right : bool, optional
Whether the new intervals are to the right or to the left
of the current intervals.
"""
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if right:
if x[0] < self.x[-1]:
raise ValueError("new x are not to the right of current ones")
else:
if x[-1] > self.x[0]:
raise ValueError("new x are not to the left of current ones")
if c.size == 0:
return
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if right:
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
else:
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ith interval is ``x[i] <= xp < x[i+1]``::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial. This representation
is the local power basis.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu,:].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivativative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
# construct a compatible polynomial
return self.construct_fast(c, self.x, self.extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
# Compute the integral
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.integrate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate),
out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : bool, optional
Whether to return roots from the polynomial extrapolated
based on first and last intervals.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep`
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**(a) * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial in the ``i``-th interval ``x[i] <= xp < x[i+1]``
is written in the Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1))
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = comb(k, a) * t**k * (1 - t)**(k - a)
with ``t = (x - x[i]) / (x[i+1] - x[i])``.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. These must be sorted in
increasing order.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, doi:10.1155/2011/829543
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k - nu representing the derivative
of this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. (Default: 1)
If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k2 = k + nu representing the
antiderivative of this polynomial.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the breakpoint)
# Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k,:], axis=0)[:-1]
return self.construct_fast(c2, x, self.extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Defaults to ``self.extrapolate``.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is not None:
ib.extrapolate = extrapolate
return ib(b) - ib(a)
def extend(self, c, x, right=True):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, integer_types):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
raise ValueError("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" %
(xi[i], len(y1), xi[i+1], len(y2), orders[i]))
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1], y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating)
or np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x,y,z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype')
and not np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices, norm_distances, out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices, norm_distances, out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
def _setdiag(a, k, v):
if not a.ndim == 2:
raise ValueError("Input array should be 2-D.")
M,N = a.shape
if k > 0:
start = k
num = N - k
else:
num = M + k
start = abs(k)*N
end = start + num*(N+1)-1
a.flat[start:end:(N+1)] = v
# Return the spline that minimizes the dis-continuity of the
# "order-th" derivative; for order >= 2.
def _find_smoothest2(xk, yk):
N = len(xk) - 1
Np1 = N + 1
# find pseudo-inverse of B directly.
Bd = np.empty((Np1, N))
for k in range(-N,N):
if (k < 0):
l = np.arange(-k, Np1)
v = (l+k+1)
if ((k+1) % 2):
v = -v
else:
l = np.arange(k,N)
v = N - l
if ((k % 2)):
v = -v
_setdiag(Bd, k, v)
Bd /= (Np1)
V2 = np.ones((Np1,))
V2[1::2] = -1
V2 /= math.sqrt(Np1)
dk = np.diff(xk)
b = 2*np.diff(yk, axis=0)/dk
J = np.zeros((N-1,N+1))
idk = 1.0/dk
_setdiag(J,0,idk[:-1])
_setdiag(J,1,-idk[1:]-idk[:-1])
_setdiag(J,2,idk[1:])
A = dot(J.T,J)
val = dot(V2,dot(A,V2))
res1 = dot(np.outer(V2,V2)/val,A)
mk = dot(np.eye(Np1)-res1, _dot0(Bd,b))
return mk
def _get_spline2_Bb(xk, yk, kind, conds):
Np1 = len(xk)
dk = xk[1:]-xk[:-1]
if kind == 'not-a-knot':
# use banded-solver
nlu = (1,1)
B = ones((3,Np1))
alpha = 2*(yk[1:]-yk[:-1])/dk
zrs = np.zeros((1,)+yk.shape[1:])
row = (Np1-1)//2
b = np.concatenate((alpha[:row],zrs,alpha[row:]),axis=0)
B[0,row+2:] = 0
B[2,:(row-1)] = 0
B[0,row+1] = dk[row-1]
B[1,row] = -dk[row]-dk[row-1]
B[2,row-1] = dk[row]
return B, b, None, nlu
else:
raise NotImplementedError("quadratic %s is not available" % kind)
def _get_spline3_Bb(xk, yk, kind, conds):
# internal function to compute different tri-diagonal system
# depending on the kind of spline requested.
# conds is only used for 'second' and 'first'
Np1 = len(xk)
if kind in ['natural', 'second']:
if kind == 'natural':
m0, mN = 0.0, 0.0
else:
m0, mN = conds
# the matrix to invert is (N-1,N-1)
# use banded solver
beta = 2*(xk[2:]-xk[:-2])
alpha = xk[1:]-xk[:-1]
nlu = (1,1)
B = np.empty((3,Np1-2))
B[0,1:] = alpha[2:]
B[1,:] = beta
B[2,:-1] = alpha[1:-1]
dyk = yk[1:]-yk[:-1]
b = (dyk[1:]/alpha[1:] - dyk[:-1]/alpha[:-1])
b *= 6
b[0] -= m0
b[-1] -= mN
def append_func(mk):
# put m0 and mN into the correct shape for
# concatenation
ma = array(m0,copy=0,ndmin=yk.ndim)
mb = array(mN,copy=0,ndmin=yk.ndim)
if ma.shape[1:] != yk.shape[1:]:
ma = ma*(ones(yk.shape[1:])[np.newaxis,...])
if mb.shape[1:] != yk.shape[1:]:
mb = mb*(ones(yk.shape[1:])[np.newaxis,...])
mk = np.concatenate((ma,mk),axis=0)
mk = np.concatenate((mk,mb),axis=0)
return mk
return B, b, append_func, nlu
elif kind in ['clamped', 'endslope', 'first', 'not-a-knot', 'runout',
'parabolic']:
if kind == 'endslope':
# match slope of lagrange interpolating polynomial of
# order 3 at end-points.
x0,x1,x2,x3 = xk[:4]
sl_0 = (1./(x0-x1)+1./(x0-x2)+1./(x0-x3))*yk[0]
sl_0 += (x0-x2)*(x0-x3)/((x1-x0)*(x1-x2)*(x1-x3))*yk[1]
sl_0 += (x0-x1)*(x0-x3)/((x2-x0)*(x2-x1)*(x3-x2))*yk[2]
sl_0 += (x0-x1)*(x0-x2)/((x3-x0)*(x3-x1)*(x3-x2))*yk[3]
xN3,xN2,xN1,xN0 = xk[-4:]
sl_N = (1./(xN0-xN1)+1./(xN0-xN2)+1./(xN0-xN3))*yk[-1]
sl_N += (xN0-xN2)*(xN0-xN3)/((xN1-xN0)*(xN1-xN2)*(xN1-xN3))*yk[-2]
sl_N += (xN0-xN1)*(xN0-xN3)/((xN2-xN0)*(xN2-xN1)*(xN3-xN2))*yk[-3]
sl_N += (xN0-xN1)*(xN0-xN2)/((xN3-xN0)*(xN3-xN1)*(xN3-xN2))*yk[-4]
elif kind == 'clamped':
sl_0, sl_N = 0.0, 0.0
elif kind == 'first':
sl_0, sl_N = conds
# Now set up the (N+1)x(N+1) system of equations
beta = np.r_[0,2*(xk[2:]-xk[:-2]),0]
alpha = xk[1:]-xk[:-1]
gamma = np.r_[0,alpha[1:]]
B = np.diag(alpha,k=-1) + np.diag(beta) + np.diag(gamma,k=1)
d1 = alpha[0]
dN = alpha[-1]
if kind == 'not-a-knot':
d2 = alpha[1]
dN1 = alpha[-2]
B[0,:3] = [d2,-d1-d2,d1]
B[-1,-3:] = [dN,-dN1-dN,dN1]
elif kind == 'runout':
B[0,:3] = [1,-2,1]
B[-1,-3:] = [1,-2,1]
elif kind == 'parabolic':
B[0,:2] = [1,-1]
B[-1,-2:] = [-1,1]
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
B[0,:2] = [2*d1,d1]
B[-1,-2:] = [dN,2*dN]
# Set up RHS (b)
b = np.empty((Np1,)+yk.shape[1:])
dyk = (yk[1:]-yk[:-1])*1.0
if kind in ['not-a-knot', 'runout', 'parabolic']:
b[0] = b[-1] = 0.0
elif kind == 'periodic':
raise NotImplementedError
elif kind == 'symmetric':
raise NotImplementedError
else:
b[0] = (dyk[0]/d1 - sl_0)
b[-1] = -(dyk[-1]/dN - sl_N)
b[1:-1,...] = (dyk[1:]/alpha[1:]-dyk[:-1]/alpha[:-1])
b *= 6.0
return B, b, None, None
else:
raise ValueError("%s not supported" % kind)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# If conds is None, then use the not_a_knot condition
# at K-1 farthest separated points in the interval
def _find_not_a_knot(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued second
# derivative at K-1 farthest separated points
def _find_natural(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then ensure zero-valued first
# derivative at K-1 farthest separated points
def _find_clamped(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def _find_fixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# If conds is None, then use coefficient periodicity
# If conds is 'function' then use function periodicity
def _find_periodic(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# Doesn't use conds
def _find_symmetric(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
# conds is a dictionary with multiple values
def _find_mixed(xk, yk, order, conds, B):
raise NotImplementedError
return _find_user(xk, yk, order, conds, B)
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj,cvals,k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),)+index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj,cvals.real[sl],k,deriv)
res[sl].imag = _fitpack._bspleval(xx,xj,cvals.imag[sl],k,deriv)
else:
res[sl] = _fitpack._bspleval(xx,xj,cvals[sl],k,deriv)
res.shape = oldshape + sh
return res
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple.
"""
return ppform.fromspline(xk, cvals, k)
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk,yk,order=order,kind=kind,conds=conds),xnew)
| bsd-3-clause |
geopandas/geopandas | geopandas/tools/clip.py | 2 | 8098 | """
geopandas.clip
==============
A module to clip vector data using GeoPandas.
"""
import warnings
import numpy as np
import pandas as pd
from shapely.geometry import Polygon, MultiPolygon
from geopandas import GeoDataFrame, GeoSeries
from geopandas.array import _check_crs, _crs_mismatch_warn
def _clip_points(gdf, poly):
"""Clip point geometry to the polygon extent.
Clip an input point GeoDataFrame to the polygon extent of the poly
parameter. Points that intersect the poly geometry are extracted with
associated attributes and returned.
Parameters
----------
gdf : GeoDataFrame, GeoSeries
Composed of point geometry that will be clipped to the poly.
poly : (Multi)Polygon
Reference geometry used to spatially clip the data.
Returns
-------
GeoDataFrame
The returned GeoDataFrame is a subset of gdf that intersects
with poly.
"""
return gdf.iloc[gdf.sindex.query(poly, predicate="intersects")]
def _clip_line_poly(gdf, poly):
"""Clip line and polygon geometry to the polygon extent.
Clip an input line or polygon to the polygon extent of the poly
parameter. Parts of Lines or Polygons that intersect the poly geometry are
extracted with associated attributes and returned.
Parameters
----------
gdf : GeoDataFrame, GeoSeries
Line or polygon geometry that is clipped to poly.
poly : (Multi)Polygon
Reference polygon for clipping.
Returns
-------
GeoDataFrame
The returned GeoDataFrame is a clipped subset of gdf
that intersects with poly.
"""
gdf_sub = gdf.iloc[gdf.sindex.query(poly, predicate="intersects")]
# Clip the data with the polygon
if isinstance(gdf_sub, GeoDataFrame):
clipped = gdf_sub.copy()
clipped[gdf.geometry.name] = gdf_sub.intersection(poly)
else:
# GeoSeries
clipped = gdf_sub.intersection(poly)
return clipped
def clip(gdf, mask, keep_geom_type=False):
"""Clip points, lines, or polygon geometries to the mask extent.
Both layers must be in the same Coordinate Reference System (CRS).
The `gdf` will be clipped to the full extent of the clip object.
If there are multiple polygons in mask, data from `gdf` will be
clipped to the total boundary of all polygons in mask.
Parameters
----------
gdf : GeoDataFrame or GeoSeries
Vector layer (point, line, polygon) to be clipped to mask.
mask : GeoDataFrame, GeoSeries, (Multi)Polygon
Polygon vector layer used to clip `gdf`.
The mask's geometry is dissolved into one geometric feature
and intersected with `gdf`.
keep_geom_type : boolean, default False
If True, return only geometries of original type in case of intersection
resulting in multiple geometry types or GeometryCollections.
If False, return all resulting geometries (potentially mixed-types).
Returns
-------
GeoDataFrame or GeoSeries
Vector data (points, lines, polygons) from `gdf` clipped to
polygon boundary from mask.
Examples
--------
Clip points (global cities) with a polygon (the South American continent):
>>> world = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_lowres'))
>>> south_america = world[world['continent'] == "South America"]
>>> capitals = geopandas.read_file(
... geopandas.datasets.get_path('naturalearth_cities'))
>>> capitals.shape
(202, 2)
>>> sa_capitals = geopandas.clip(capitals, south_america)
>>> sa_capitals.shape
(12, 2)
"""
if not isinstance(gdf, (GeoDataFrame, GeoSeries)):
raise TypeError(
"'gdf' should be GeoDataFrame or GeoSeries, got {}".format(type(gdf))
)
if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon)):
raise TypeError(
"'mask' should be GeoDataFrame, GeoSeries or"
"(Multi)Polygon, got {}".format(type(mask))
)
if isinstance(mask, (GeoDataFrame, GeoSeries)):
if not _check_crs(gdf, mask):
_crs_mismatch_warn(gdf, mask, stacklevel=3)
if isinstance(mask, (GeoDataFrame, GeoSeries)):
box_mask = mask.total_bounds
else:
box_mask = mask.bounds
box_gdf = gdf.total_bounds
if not (
((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))
and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))
):
return gdf.iloc[:0]
if isinstance(mask, (GeoDataFrame, GeoSeries)):
poly = mask.geometry.unary_union
else:
poly = mask
geom_types = gdf.geometry.type
poly_idx = np.asarray((geom_types == "Polygon") | (geom_types == "MultiPolygon"))
line_idx = np.asarray(
(geom_types == "LineString")
| (geom_types == "LinearRing")
| (geom_types == "MultiLineString")
)
point_idx = np.asarray((geom_types == "Point") | (geom_types == "MultiPoint"))
geomcoll_idx = np.asarray((geom_types == "GeometryCollection"))
if point_idx.any():
point_gdf = _clip_points(gdf[point_idx], poly)
else:
point_gdf = None
if poly_idx.any():
poly_gdf = _clip_line_poly(gdf[poly_idx], poly)
else:
poly_gdf = None
if line_idx.any():
line_gdf = _clip_line_poly(gdf[line_idx], poly)
else:
line_gdf = None
if geomcoll_idx.any():
geomcoll_gdf = _clip_line_poly(gdf[geomcoll_idx], poly)
else:
geomcoll_gdf = None
order = pd.Series(range(len(gdf)), index=gdf.index)
concat = pd.concat([point_gdf, line_gdf, poly_gdf, geomcoll_gdf])
if keep_geom_type:
geomcoll_concat = (concat.geom_type == "GeometryCollection").any()
geomcoll_orig = geomcoll_idx.any()
new_collection = geomcoll_concat and not geomcoll_orig
if geomcoll_orig:
warnings.warn(
"keep_geom_type can not be called on a "
"GeoDataFrame with GeometryCollection."
)
else:
polys = ["Polygon", "MultiPolygon"]
lines = ["LineString", "MultiLineString", "LinearRing"]
points = ["Point", "MultiPoint"]
# Check that the gdf for multiple geom types (points, lines and/or polys)
orig_types_total = sum(
[
gdf.geom_type.isin(polys).any(),
gdf.geom_type.isin(lines).any(),
gdf.geom_type.isin(points).any(),
]
)
# Check how many geometry types are in the clipped GeoDataFrame
clip_types_total = sum(
[
concat.geom_type.isin(polys).any(),
concat.geom_type.isin(lines).any(),
concat.geom_type.isin(points).any(),
]
)
# Check there aren't any new geom types in the clipped GeoDataFrame
more_types = orig_types_total < clip_types_total
if orig_types_total > 1:
warnings.warn(
"keep_geom_type can not be called on a mixed type GeoDataFrame."
)
elif new_collection or more_types:
orig_type = gdf.geom_type.iloc[0]
if new_collection:
concat = concat.explode()
if orig_type in polys:
concat = concat.loc[concat.geom_type.isin(polys)]
elif orig_type in lines:
concat = concat.loc[concat.geom_type.isin(lines)]
# Return empty GeoDataFrame or GeoSeries if no shapes remain
if len(concat) == 0:
return gdf.iloc[:0]
# Preserve the original order of the input
if isinstance(concat, GeoDataFrame):
concat["_order"] = order
return concat.sort_values(by="_order").drop(columns="_order")
else:
concat = GeoDataFrame(geometry=concat)
concat["_order"] = order
return concat.sort_values(by="_order").geometry
| bsd-3-clause |
DrigerG/IIITB-ML | project/TexCounter/tex_counter.py | 1 | 9127 | #!/usr/bin/env python
"""TexCounter.py: Counts the number of garments in a shelf"""
import cv2
import sys
import logging
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.cluster import MeanShift, estimate_bandwidth
__author__ = "Pradeep Kumar A.V."
logging.basicConfig(filename='execution_log.log',
format='%(asctime)s %(message)s',
level=logging.DEBUG)
def log_and_print(msg):
logging.info(msg)
print(msg)
class TexCounter(object):
def __init__(self, data_dir, out_dir, image_list, bbox_list):
self.data_dir = data_dir
self.out_dir = out_dir
self.image_list = image_list
self.bbox_list = bbox_list
# Homogeneity index
self.egde_threshold = 20
self.homogeneity_index = 0.5
@staticmethod
def _load_img(path):
"""
:param path: path of image to be loaded.
:return: cv2 image object
"""
img = cv2.imread(path)
# Convert the image from cv2 default BGR format to RGB
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
@staticmethod
def _region_of_interest(im, bbox):
"""
"""
# defining a blank mask to start with
img = im[bbox[0]:bbox[1], bbox[2]:bbox[3]]
return img
@staticmethod
def _denoise(img):
"""
De-noises the image using Gaussian smoothing
:param img: image to be de-noised
:return: Gaussian smoothed image
"""
return cv2.GaussianBlur(img, (0, 0), 1)
def _gradient(self, img):
"""
:param img: input RGB image
:return: Sobel Image gradient
"""
# A boosted version of horizontal gradient
kernel_y = np.array(
[[2, 4, 2],
[0, 0, 0],
[-2, -4, -2]])
kernel_x = np.array(
[[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
im1 = np.uint8(np.absolute(
cv2.filter2D(img, -1, kernel_x)))
im2 = np.uint8(np.absolute(
cv2.filter2D(img, -1, kernel_y)))
return np.invert(im1 > self.egde_threshold)*im2
@staticmethod
def _kmeans_quantization(img):
"""
:param img: input RGB image
:return: k-Means color quantized image
"""
data = img.reshape((-1, 3))
data = np.float32(data)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1)
K = 8
ret, label, center = cv2.kmeans(data, K, None, criteria, 10,
cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape(img.shape)
return res2
@staticmethod
def _enhance_image(img):
"""
:param img:
:return:
"""
img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
# equalize the histogram of the Y channel
img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])
# convert the YUV image back to RGB format
img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
return img_output
def _preprocess_image(self, img):
"""
:param img: input RGB image
:return: Preprocessed image
"""
# Denoise image
img = self._denoise(img)
# Enhance image
img = self._enhance_image(img)
# Determine the image gradient
img = self._gradient(img)
# Quantize the color levels using kmeans
img = self._kmeans_quantization(img)
return img
def process_image(self, img):
"""
:param img: input RGB image
:return:
"""
im = self._preprocess_image(img)
im = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
# adaptive thresholding to handle different illumination scales
adaptive_th_img = cv2.adaptiveThreshold(im, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 5, 0)
# image dilation to fill the gaps left by thresholding
kernel = np.ones((2, 2), np.uint8)
adaptive_th_img = cv2.morphologyEx(adaptive_th_img, cv2.MORPH_OPEN,
kernel)
kernel = np.ones((1, 10), np.uint8)
adaptive_th_img = cv2.morphologyEx(adaptive_th_img, cv2.MORPH_CLOSE,
kernel)
return adaptive_th_img
def label_components(self, img):
filtered_labels = []
labels = []
output = cv2.connectedComponentsWithStats(img, 8, cv2.CV_32S)
count = output[0]
label_img = output[1]
stats = output[2]
# Filter noise
limit = (max(stats[1:count, cv2.CC_STAT_AREA]))/10
for i in range(1, count):
if stats[i, cv2.CC_STAT_AREA] > limit:
filtered_labels.append((i, stats[i, cv2.CC_STAT_AREA]))
else:
label_img[label_img == i] = 0
# Filter non homogeneous (in terms of area) labels
avg_area = np.mean([a for l, a in filtered_labels])
for lab, area in filtered_labels:
if area > avg_area * self.homogeneity_index:
labels.append(lab)
else:
label_img[label_img == lab] = 0
return label_img, labels
def batch_process(self):
"""
:return:
"""
for entry, bbox in zip(self.image_list, self.bbox_list):
img = self._load_img('%s/%s' % (self.data_dir, entry))
roi = self._region_of_interest(img, bbox)
pro_img = self.process_image(roi)
label_img, labels = self.label_components(pro_img)
# Initialize subplot structure
sub, ax = plt.subplots(2, 2)
# Plot original image
ax[0][0].imshow(img)
ax[0][0].set_title('Image - I')
# Plot the region of interest
ax[0][1].imshow(roi)
ax[0][1].set_title('Image - ROI')
ax[1][0].imshow(pro_img, cmap='gray')
ax[1][0].set_title('Image - Intermediate')
ax[1][1].imshow(label_img, cmap='gray')
ax[1][1].set_title('Image - Processed')
disp_string = "Image - %s, BBox - %s, Count - %s" \
% (entry, bbox, len(labels))
sub.tight_layout()
sub.text(0.25, 0.01, "\n\n\n%s" % disp_string)
plt.savefig("%s/output-%s" % (self.out_dir, entry))
log_and_print(disp_string)
refPt = []
cropping = False
img_window = None
def click_and_crop(event, x, y, flags, param):
# grab references to the global variables
global refPt, cropping, img_window
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being
# performed
if event == cv2.EVENT_LBUTTONDOWN:
refPt = [(x, y)]
cropping = True
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates and indicate that
# the cropping operation is finished
refPt.append((x, y))
cropping = False
# return refPt[0], refPt[1]
# draw a rectangle around the region of interest
cv2.rectangle(img_window, refPt[0], refPt[1], (0, 255, 0), 2)
cv2.imshow("image", img_window)
def get_bbox(image_path):
global img_window
img_window = cv2.imread(image_path)
clone = img_window.copy()
cv2.namedWindow("image", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("image", click_and_crop)
# keep looping until the 'q' key is pressed
while True:
# display the image and wait for a keypress
cv2.imshow("image", img_window)
key = cv2.waitKey(1) & 0xFF
# if the 'r' key is pressed, reset the cropping region
if key == ord("r"):
img_window = clone.copy()
# if the 'c' key is pressed, break from the loop
elif key == ord("c"):
break
# if there are two reference points, then crop the region of interest
# from teh image and display it
if len(refPt) == 2:
# close all open windows
cv2.destroyAllWindows()
return [refPt[0][1], refPt[1][1], refPt[0][0], refPt[1][0]]
def main():
if len(sys.argv) > 1:
input_csv = sys.argv[1]
else:
input_csv = "bbox.csv"
input_path = "Data"
output_path = "Outputs"
image_data = pd.read_csv(input_csv)
image_data = image_data.where((
pd.notnull(image_data)), None)
bbox_list = []
image_list = []
for entry in image_data.values:
image_list.append(entry[0])
bbox = list(entry[1:])
if None in bbox:
bbox = get_bbox('%s/%s' % (input_path, entry[0]))
bbox_list.append(bbox)
tc = TexCounter(input_path, output_path, image_list, bbox_list)
tc.batch_process()
if __name__ == '__main__':
main()
| apache-2.0 |
robotenique/intermediateProgramming | MAC0209/paretoLaw.py | 1 | 2832 | '''
Problem 1.1 of the book 'Introduction to Computer Simulation Methods',
chapter one: Distribution of Money
'''
__author__ = "Juliano Garcia de Oliveira"
import random as rnd
import matplotlib.pyplot as plt
import numpy as np
def createAgents(n, m0):
# Create a dictionary with n pairs of 'agent : m0', and return it
agents = dict(((x,m0) for x in range(n)))
return agents
def executeTransactions(n, agents, t):
'''
Run a transaction t times. Each transaction does:
-> Choose a random pair of agents
-> They trade their money by a random amount
'''
for x in range(t):
i = rnd.choice(list(agents.keys()))
j = rnd.choice(list(agents.keys()))
while i == j:
j = rnd.choice(list(agents.keys()))
money = agents[i] + agents[j]
change = rnd.random()
agents[i] = change*(money)
agents[j] = (1 - change)*(money)
return agents
def executeTransactionFrac(n, agents, t, frac):
'''
Run a transaction t times. Each transaction does:
-> Choose a random pair of agents
-> They trade their money by a random amount but save some money
'''
for x in range(t):
i = rnd.choice(list(agents.keys()))
j = rnd.choice(list(agents.keys()))
while i == j:
j = rnd.choice(list(agents.keys()))
money = agents[i] + agents[j]
change = rnd.random()
deltaM = (1 - frac)*(change*agents[j] - (1 - change)*agents[i])
agents[i] += deltaM
agents[j] -= deltaM
return agents
def showBarplot(agents):
ab = dict()
n = len(list(agents.keys()))
ind = np.arange(n)
fig, ax = plt.subplots()
rect = ax.bar(ind, list(agents.values()))
plt.show()
def showPiechart(agents, m0):
cat1 = sum((1 for i in list(agents.values()) if i > 0.75*m0))
cat2 = sum((1 for i in list(agents.values()) if i > 0.50*m0 and i <= 0.75*m0))
cat3 = sum((1 for i in list(agents.values()) if i > 0.25*m0 and i <= 0.50*m0))
cat4 = sum((1 for i in list(agents.values()) if i <= 0.25*m0))
labels = 'Above 75%', 'From 50% to 75%', 'From 25% to 50%', 'Below 25%'
sizes = [cat1, cat2, cat3, cat4]
fig1, ax1 = plt.subplots()
ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
def main():
n = int(input("N = "))
m0 = float(input("m0 = "))
times = int(input("times = "))
# Pareto Law - Simple Model
t1 = createAgents(n, m0)
t1 = executeTransactions(n, t1, times)
showBarplot(t1)
showPiechart(t1, m0)
# Pareto Law - Savings Model
frac = float(input("Fraction = "))
t1 = createAgents(n, m0)
t1 = executeTransactionFrac(n, t1, times, frac)
showBarplot(t1)
showPiechart(t1, m0)
if __name__ == '__main__':
main()
| unlicense |
gamahead/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| gpl-3.0 |
sonnyhu/scikit-learn | sklearn/naive_bayes.py | 4 | 30634 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like, shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB(priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB(priors=None)
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, priors=None):
self.priors = priors
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool, optional (default=False)
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
n_classes = len(self.classes_)
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if priors.sum() != 1.0:
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_) -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes], optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional (default=0.0)
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,], optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
davidgbe/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
bubae/gazeAssistRecognize | test.py | 1 | 2664 | import numpy as np
from sklearn import svm
from sklearn import datasets
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
# from sklearn.pipeline import Pipeline
# from sklearn.feature_extraction.text import CountVectorizer
# from sklearn.svm import LinearSVC
# from sklearn.feature_extraction.text import TfidfTransformer
# from sklearn.multiclass import OneVsRestClassifier
# from sklearn import preprocessing
# X_train = np.array(["new york is a hell of a town",
# "new york was originally dutch",
# "the big apple is great",
# "new york is also called the big apple",
# "nyc is nice",
# "people abbreviate new york city as nyc",
# "the capital of great britain is london",
# "london is in the uk",
# "london is in england",
# "london is in great britain",
# "it rains a lot in london",
# "london hosts the british museum",
# "new york is great and so is london",
# "i like london better than new york"])
# y_train_text = [["new york"],["new york"],["new york"],["new york"],["new york"],
# ["new york"],["london"],["london"],["london"],["london"],
# ["london"],["london"],["new york","london"],["new york","london"]]
# X_test = np.array(['nice day in nyc',
# 'welcome to london',
# 'london is rainy',
# 'it is raining in britian',
# 'it is raining in britian and the big apple',
# 'it is raining in britian and nyc',
# 'hello welcome to new york. enjoy it here and london too'])
# target_names = ['New York', 'London']
# lb = preprocessing.LabelBinarizer()
# Y = lb.fit_transform(y_train_text)
# classifier = Pipeline([
# ('vectorizer', CountVectorizer()),
# ('tfidf', TfidfTransformer()),
# ('clf', OneVsRestClassifier(LinearSVC()))])
# classifier.fit(X_train, Y)
# predicted = classifier.predict(X_test)
# all_labels = lb.inverse_transform(predicted)
# for item, labels in zip(X_test, all_labels):
# print '%s => %s' % (item, ', '.join(labels))
# X = [[0], [1], [2], [3]]
# Y = [0, 1, 2, 3]
# lin_clf = svm.LinearSVC()
# lin_clf.fit(X, Y)
# dec = lin_clf.decision_function([[1]])
# # print dec.predict([[1]])
# print dec.shape
iris = datasets.load_iris()
X, y = iris.data, iris.target
print X.shape
print y.shape
print OneVsRestClassifier(LinearSVC(random_state=0)).fit(X, y).predict(X[50]) | mit |
puruckertom/ubertool | ubertool/agdrift/tests/test_agdrift_integration.py | 1 | 10694 | from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import pkgutil
import sys
from tabulate import tabulate
import unittest
try:
from StringIO import StringIO #BitesIO?
except ImportError:
from io import StringIO, BytesIO
##find parent directory and import model
#parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
#print(parentddir)
#sys.path.append(parentddir)
from ..agdrift_exe import Agdrift, AgdriftOutputs
print(sys.path)
# load transposed qaqc data for inputs and expected outputs
# this works for both local nosetests and travis deploy
#input details
try:
if __package__ is not None:
csv_data = pkgutil.get_data(__package__, 'agdrift_qaqc_in_transpose.csv')
data_inputs = BytesIO(csv_data)
pd_obj_inputs = pd.read_csv(data_inputs, index_col=0, engine='python')
else:
csv_transpose_path_in = os.path.join(os.path.dirname(__file__),"agdrift_qaqc_in_transpose.csv")
#print(csv_transpose_path_in)
pd_obj_inputs = pd.read_csv(csv_transpose_path_in, index_col=0, engine='python')
#with open('./agdrift_qaqc_in_transpose.csv') as f:
#csv_data = csv.reader(f)
finally:
pass
#print('agdrift inputs')
#print('agdrift input dimensions ' + str(pd_obj_inputs.shape))
#print('agdrift input keys ' + str(pd_obj_inputs.columns.values.tolist()))
#print(pd_obj_inputs)
# load transposed qaqc data for expected outputs
# works for local nosetests from parent directory
# but not for travis container that calls nosetests:
# csv_transpose_path_exp = "./terrplant_qaqc_exp_transpose.csv"
# pd_obj_exp_out = pd.read_csv(csv_transpose_path_exp, index_col=0, engine='python')
# print(pd_obj_exp_out)
# this works for both local nosetests and travis deploy
#expected output details
try:
if __package__ is not None:
csv_data = pkgutil.get_data(__package__, 'agdrift_qaqc_exp_transpose.csv')
data_exp_outputs = BytesIO(csv_data)
pd_obj_exp = pd.read_csv(data_exp_outputs, index_col=0, engine= 'python')
#print("agdrift expected outputs")
#print('agdrift expected output dimensions ' + str(pd_obj_exp.shape))
#print('agdrift expected output keys ' + str(pd_obj_exp.columns.values.tolist()))
else:
#csv_transpose_path_exp = "./agdrift_qaqc_exp_transpose.csv"
csv_transpose_path_exp = os.path.join(os.path.dirname(__file__),"agdrift_qaqc_exp_transpose.csv")
#print(csv_transpose_path_exp)
pd_obj_exp = pd.read_csv(csv_transpose_path_exp, index_col=0, engine='python')
finally:
pass
#print('agdrift expected')
#generate output
agdrift_calc = Agdrift(pd_obj_inputs, pd_obj_exp)
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_calc.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_calc.db_table = 'output'
agdrift_calc.execute_model()
agdrift_output_empty = AgdriftOutputs()
inputs_json, outputs_json, exp_out_json = agdrift_calc.get_dict_rep()
#print("agdrift output")
#print(inputs_json)
#
#print(tabulate(pd_obj_inputs.iloc[:,0:5], headers='keys', tablefmt='fancy_grid'))
#print(tabulate(pd_obj_inputs.iloc[:,6:11], headers='keys', tablefmt='fancy_grid'))
#print(tabulate(pd_obj_inputs.iloc[:,12:17], headers='keys', tablefmt='fancy_grid'))
#
#print(tabulate(pd_obj_exp.iloc[:,0:1], headers='keys', tablefmt='fancy_grid'))
test = {}
class TestAgdrift(unittest.TestCase):
"""
Integration tests for agdrift model.
"""
print("agdrift integration tests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Test setup method.
:return:
"""
pass
def tearDown(self):
"""
Test teardown method.
:return:
"""
pass
def test_assert_output_series(self):
""" Verify that each output variable is a pd.Series """
try:
num_variables = len(agdrift_calc.pd_obj_out.columns)
result = pd.Series(False, index=list(range(num_variables)), dtype='bool')
expected = pd.Series(True, index=list(range(num_variables)), dtype='bool')
for i in range(num_variables):
column_name = agdrift_calc.pd_obj_out.columns[i]
output = getattr(agdrift_calc, column_name)
if isinstance(output, pd.Series):
result[i] = True
tab = pd.concat([result,expected], axis=1)
print('model output properties as pandas series')
print(tabulate(tab, headers='keys', tablefmt='fancy_grid'))
npt.assert_array_equal(result, expected)
finally:
pass
return
def test_assert_output_series_dtypes(self):
""" Verify that each output variable is the correct dtype,
essentially checking that initial declaration of dtype has not
changed due to computation-based coercion of dtype"""
try:
num_variables = len(agdrift_calc.pd_obj_out.columns)
result = pd.Series(False, index=list(range(num_variables)), dtype='bool')
expected = pd.Series(True, index=list(range(num_variables)), dtype='bool')
for i in range(num_variables):
#get the string of the dtype that is expected and the type that has resulted
output_name = agdrift_calc.pd_obj_out.columns[i]
output_result = getattr(agdrift_calc, output_name)
output_dtype_result = output_result.dtype.name
#agdrift_output_empty is a copy of the original ModelOutputs declarations (unchanged by computations
output_expected_attr = getattr(agdrift_output_empty, output_name)
output_dtype_expected = output_expected_attr.dtype.name
if output_dtype_result == output_dtype_expected:
result[i] = True
#tab = pd.concat([result,expected], axis=1)
if(result[i] != expected[i]):
print(str(i) + ":" + output_name)
print("output assertion state (result/expected) : " + str(result[i]) + "/" + str(expected[i]))
print("output dtype (result/expected) : " + output_dtype_result + "/" + output_dtype_expected)
npt.assert_array_equal(result, expected)
finally:
pass
return
def test_sim_scenario_chk(self):
"""
Integration test for agdrift.agdrift_fugacity
"""
try:
self.blackbox_method_str('sim_scenario_chk')
finally:
pass
return
def test_sim_scenario_id(self):
"""
Integration test for agdrift.agdrift_fugacity
"""
try:
self.blackbox_method_str('sim_scenario_id')
finally:
pass
return
def test_distance_downwind(self):
"""
Integration test for agdrift.agdrift_fugacity
"""
func_name = inspect.currentframe().f_code.co_name
try:
self.blackbox_method_float('distance_downwind', func_name)
finally:
pass
return
def test_avg_dep_foa(self):
"""
Integration test for agdrift.agdrift_fugacity
"""
func_name = inspect.currentframe().f_code.co_name
try:
self.blackbox_method_float('avg_dep_foa', func_name)
finally:
pass
return
def test_avg_dep_lbac(self):
"""
Integration test for agdrift.agdrift_fugacity
"""
func_name = inspect.currentframe().f_code.co_name
try:
self.blackbox_method_float('avg_dep_lbac', func_name)
finally:
pass
return
def test_avg_dep_gha(self):
"""
Integration test for agdrift.agdrift_fugacity
"""
func_name = inspect.currentframe().f_code.co_name
try:
self.blackbox_method_float('avg_dep_gha', func_name)
finally:
pass
return
def test_avg_waterconc_ngl(self):
"""
Integration test for agdrift.agdrift_fugacity
"""
func_name = inspect.currentframe().f_code.co_name
try:
self.blackbox_method_float('avg_waterconc_ngl', func_name)
finally:
pass
return
def test_avg_field_dep_mgcm2(self):
"""
Integration test for agdrift.agdrift_fugacity
"""
func_name = inspect.currentframe().f_code.co_name
try:
self.blackbox_method_float('avg_field_dep_mgcm2', func_name)
finally:
pass
return
def test_range_chk(self):
"""
Integration test for agdrift.agdrift_fugacity
"""
try:
self.blackbox_method_str('range_chk')
finally:
pass
return
def blackbox_method_float(self, output, func_name):
"""
Helper method to reuse code for testing numpy array outputs from TerrPlant model
:param output: String; Pandas Series name (e.g. column name) without '_out'
:return:
"""
try:
pd.set_option('display.float_format','{:.4E}'.format) # display model output in scientific notation
result = agdrift_calc.pd_obj_out["out_" + output]
expected = agdrift_calc.pd_obj_exp["exp_" + output]
#npt.assert_array_almost_equal(result, expected, 4, '', True)
rtol = 1e-3
npt.assert_allclose(actual=result, desired=expected, rtol=rtol, atol=0, equal_nan=True, err_msg='', verbose=True)
finally:
tab = pd.concat([result, expected], axis=1)
print("\n")
print(func_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def blackbox_method_str(self, output):
"""
Helper method.
:param output:
:return:
"""
try:
result = agdrift_calc.pd_obj_out["out_" + output]
expected = agdrift_calc.pd_obj_exp["exp_" + output]
npt.assert_array_equal(result, expected)
finally:
tab = pd.concat([result,expected], axis=1)
print(" ")
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
# unittest will
# 1) call the setup method,
# 2) then call every method starting with "test",
# 3) then the teardown method
if __name__ == '__main__':
unittest.main() | unlicense |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/backend_bases.py | 4 | 110334 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes such as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
:class:`ShowBase`
The base class for the Show class of each interactive backend;
the 'show' callable is then set to Show.__call__, inherited from
ShowBase.
:class:`ToolContainerBase`
The base class for the Toolbar class of each interactive backend.
:class:`StatusbarBase`
The base class for the messaging area.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from contextlib import contextmanager
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import os
import sys
import warnings
import time
import io
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
#import matplotlib.path as path
from matplotlib import rcParams
from matplotlib import is_interactive
from matplotlib import get_backend
from matplotlib._pylab_helpers import Gcf
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
import matplotlib.tight_bbox as tight_bbox
import matplotlib.textpath as textpath
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation, warn_deprecated
import matplotlib.backend_tools as tools
try:
from importlib import import_module
except:
# simple python 2.6 implementation (no relative imports)
def import_module(name):
__import__(name)
return sys.modules[name]
try:
from PIL import Image
_has_pil = True
del Image
except ImportError:
_has_pil = False
_default_filetypes = {
'ps': 'Postscript',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
_default_backends = {
'ps': 'matplotlib.backends.backend_ps',
'eps': 'matplotlib.backends.backend_ps',
'pdf': 'matplotlib.backends.backend_pdf',
'pgf': 'matplotlib.backends.backend_pgf',
'png': 'matplotlib.backends.backend_agg',
'raw': 'matplotlib.backends.backend_agg',
'rgba': 'matplotlib.backends.backend_agg',
'svg': 'matplotlib.backends.backend_svg',
'svgz': 'matplotlib.backends.backend_svg',
}
def register_backend(format, backend, description=None):
"""
Register a backend for saving to a given file format.
format : str
File extention
backend : module string or canvas class
Backend for handling file output
description : str, optional
Description of the file type. Defaults to an empty string
"""
if description is None:
description = ''
_default_backends[format] = backend
_default_filetypes[format] = description
def get_registered_canvas_class(format):
"""
Return the registered default canvas for given file format.
Handles deferred import of required backend.
"""
if format not in _default_backends:
return None
backend_class = _default_backends[format]
if cbook.is_string_like(backend_class):
backend_class = import_module(backend_class).FigureCanvas
_default_backends[format] = backend_class
return backend_class
class ShowBase(object):
"""
Simple base class to generate a show() callable in backends.
Subclass must override mainloop() method.
"""
def __call__(self, block=None):
"""
Show all figures. If *block* is not None, then
it is a boolean that overrides all other factors
determining whether show blocks by calling mainloop().
The other factors are:
it does not block if run inside ipython's "%pylab" mode
it does not block in interactive mode.
"""
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
manager.show()
if block is not None:
if block:
self.mainloop()
return
else:
return
# Hack: determine at runtime whether we are
# inside ipython in pylab mode.
from matplotlib import pyplot
try:
ipython_pylab = not pyplot.show._needmain
# IPython versions >= 0.10 tack the _needmain
# attribute onto pyplot.show, and always set
# it to False, when in %pylab mode.
ipython_pylab = ipython_pylab and get_backend() != 'WebAgg'
# TODO: The above is a hack to get the WebAgg backend
# working with ipython's `%pylab` mode until proper
# integration is implemented.
except AttributeError:
ipython_pylab = False
# Leave the following as a separate step in case we
# want to control this behavior with an rcParam.
if ipython_pylab:
return
if not is_interactive() or get_backend() == 'WebAgg':
self.mainloop()
def mainloop(self):
pass
class RendererBase(object):
"""An abstract base class to handle drawing/rendering operations.
The following methods must be implemented in the backend for full
functionality (though just implementing :meth:`draw_path` alone would
give a highly capable backend):
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_gouraud_triangle`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_text`
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
self._text2path = textpath.TextToPath()
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`.
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans +
transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
"""
Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied. *offset_position* may be either "screen" or "data"
depending on the space that the offsets are in.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
:meth:`draw_path`. Some backends may want to override this in
order to render each set of path data only once, and then
reference that path multiple times with the different offsets,
colors, styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transforms.Affine2D(transform)))
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_ids, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
path, transform = path_id
transform = transforms.Affine2D(
transform.get_matrix()).translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if edgecolors is None:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], np.float_)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None], 'screen')
def draw_gouraud_triangle(self, gc, points, colors, transform):
"""
Draw a Gouraud-shaded triangle.
*points* is a 3x2 array of (x, y) points for the triangle.
*colors* is a 3x4 array of RGBA colors for each point of the
triangle.
*transform* is an affine transform to apply to the points.
"""
raise NotImplementedError
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draws a series of Gouraud triangles.
*points* is a Nx3x2 array of (x, y) points for the trianglex.
*colors* is a Nx3x4 array of RGBA colors for each point of the
triangles.
*transform* is an affine transform to apply to the points.
"""
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = Affine2D(all_transforms[i % Ntransforms])
yield path, transform + master_transform
def _iter_collection_uses_per_path(self, paths, all_transforms,
offsets, facecolors, edgecolors):
"""
Compute how many times each raw path object returned by
_iter_collection_raw_paths would be used when calling
_iter_collection. This is intended for the backend to decide
on the tradeoff between using the paths in-line and storing
them once and reusing. Rounds up in case the number of uses
is not the same for every path.
"""
Npaths = len(paths)
if Npaths == 0 or (len(facecolors) == 0 and len(edgecolors) == 0):
return 0
Npath_ids = max(Npaths, len(all_transforms))
N = max(Npath_ids, len(offsets))
return (N + Npath_ids - 1) // Npath_ids
def _iter_collection(self, gc, master_transform, all_transforms,
path_ids, offsets, offsetTrans, facecolors,
edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Ntransforms = len(all_transforms)
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc0 = self.new_gc()
gc0.copy_properties(gc)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if offset_position == 'data':
if Ntransforms:
transform = (
Affine2D(all_transforms[i % Ntransforms]) +
master_transform)
else:
transform = master_transform
xo, yo = transform.transform_point((xo, yo))
xp, yp = transform.transform_point((0, 0))
xo = -(xp - xo)
yo = -(yp - yo)
if not (np.isfinite(xo) and np.isfinite(yo)):
continue
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
if Nlinewidths:
gc0.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc0.set_dashes(*linestyles[i % Nlinestyles])
fg = edgecolors[i % Nedgecolors]
if len(fg) == 4:
if fg[3] == 0.0:
gc0.set_linewidth(0)
else:
gc0.set_foreground(fg)
else:
gc0.set_foreground(fg)
if rgbFace is not None and len(rgbFace) == 4:
if rgbFace[3] == 0:
rgbFace = None
gc0.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc0.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc0, rgbFace
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im):
"""
Draw the image instance into the current axes;
*gc*
a GraphicsContext containing clipping information
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
override this method for renderers that do not necessarily always
want to rescale and composite raster images. (like SVG, PDF, or PS)
"""
return False
def option_scale_image(self):
"""
override this method for renderers that support arbitrary
scaling of image (most of the vector backend).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text baseline in display coords
*s*
the text string
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
*mtext*
a :class:`matplotlib.text.Text` instance
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be plotted along with
your text.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _get_text_path_transform(self, x, y, s, prop, angle, ismath):
"""
return the text path and transform
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
if ismath == "TeX":
verts, codes = text2path.get_text_path(prop, s, ismath=False,
usetex=True)
else:
verts, codes = text2path.get_text_path(prop, s, ismath=ismath,
usetex=False)
path = Path(verts, codes)
angle = angle / 180. * 3.141592
if self.flipy():
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, self.height - y)
else:
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, y)
return path, transform
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
path, transform = self._get_text_path_transform(
x, y, s, prop, angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
if ismath == 'TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self._text2path.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
dpi = self.points_to_pixels(72)
if ismath:
dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
return dims[0:3] # return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
size = prop.get_size_in_points()
font.set_size(size, dpi)
# the width and height of unrotated string
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, e.g., postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
"""
Used in MixedModeRenderer. Switch to the raster renderer.
"""
pass
def stop_rasterizing(self):
"""
Used in MixedModeRenderer. Switch back to the vector renderer
and draw the contents of the raster renderer as an image on
the vector renderer.
"""
pass
def start_filter(self):
"""
Used in AggRenderer. Switch to a temporary renderer for image
filtering effects.
"""
pass
def stop_filter(self, filter_func):
"""
Used in AggRenderer. Switch back to the original renderer.
The contents of the temporary renderer is processed with the
*filter_func* and is drawn on the original renderer as an
image.
"""
pass
class GraphicsContextBase(object):
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid': (None, None),
'dashed': (0, (6.0, 6.0)),
'dashdot': (0, (3.0, 5.0, 1.0, 5.0)),
'dotted': (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'round'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0, 1.0)
self._hatch = None
self._url = None
self._gid = None
self._snap = None
self._sketch = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._forced_alpha = gc._forced_alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._url = gc._url
self._gid = gc._gid
self._snap = gc._snap
self._sketch = gc._sketch
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack
"""
pass
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox`
instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_forced_alpha(self):
"""
Return whether the value given by get_alpha() should be used to
override any other alpha-channel values.
"""
return self._forced_alpha
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three or four floats from 0-1.
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_gid(self):
"""
Return the object identifier if one is set, None otherwise.
"""
return self._gid
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
If ``alpha=None`` (the default), the alpha components of the
foreground and fill colors will be used to set their respective
transparencies (where applicable); otherwise, ``alpha`` will override
them.
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._alpha = 1.0
self._forced_alpha = False
self.set_foreground(self._rgb, isRGBA=True)
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b:
self._antialiased = 1
else:
self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
if path is not None and not isinstance(path,
transforms.TransformedPath):
msg = ("Path should be a matplotlib.transforms.TransformedPath"
"instance.")
raise ValueError(msg)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points.
``(None, None)`` specifies a solid line
"""
if dash_list is not None:
dl = np.asarray(dash_list)
if np.any(dl <= 0.0):
raise ValueError("All values in the dash list must be positive")
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGBA=False):
"""
Set the foreground color. fg can be a MATLAB format string, a
html hex color string, an rgb or rgba unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
If you know fg is rgba, set ``isRGBA=True`` for efficiency.
"""
if self._forced_alpha and isRGBA:
self._rgb = fg[:3] + (self._alpha,)
elif self._forced_alpha:
self._rgb = colors.colorConverter.to_rgba(fg, self._alpha)
elif isRGBA:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
# When removing, remember to remove all overrides in subclasses.
msg = ("set_graylevel is deprecated for removal in 1.6; "
"you can achieve the same result by using "
"set_foreground((frac, frac, frac))")
warnings.warn(msg, mplDeprecation)
self._rgb = (frac, frac, frac, self._alpha)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = float(w)
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted'). One may specify customized dash styles by providing
a tuple of (offset, dash pairs). For example, the predefiend
linestyles have following values.:
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
"""
if style in self.dashd:
offset, dashes = self.dashd[style]
elif isinstance(style, tuple):
offset, dashes = style
else:
raise ValueError('Unrecognized linestyle: %s' % str(style))
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_gid(self, id):
"""
Sets the id.
"""
self._gid = id
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
def get_hatch_path(self, density=6.0):
"""
Returns a Path for the current hatch.
"""
if self._hatch is None:
return None
return Path.hatch(self._hatch, density)
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
class TimerBase(object):
'''
A base class for providing timer events, useful for things animations.
Backends need to implement a few specific methods in order to use their
own timing mechanisms so that the timer events are integrated into their
event loops.
Mandatory functions that must be implemented:
* `_timer_start`: Contains backend-specific code for starting
the timer
* `_timer_stop`: Contains backend-specific code for stopping
the timer
Optional overrides:
* `_timer_set_single_shot`: Code for setting the timer to
single shot operating mode, if supported by the timer
object. If not, the `Timer` class itself will store the flag
and the `_on_timer` method should be overridden to support
such behavior.
* `_timer_set_interval`: Code for setting the interval on the
timer, if there is a method for doing so on the timer
object.
* `_on_timer`: This is the internal function that any timer
object should call, which will handle the task of running
all callbacks that have been set.
Attributes:
* `interval`: The time between timer events in
milliseconds. Default is 1000 ms.
* `single_shot`: Boolean flag indicating whether this timer
should operate as single shot (run once and then
stop). Defaults to `False`.
* `callbacks`: Stores list of (func, args) tuples that will be
called upon timer events. This list can be manipulated
directly, or the functions `add_callback` and
`remove_callback` can be used.
'''
def __init__(self, interval=None, callbacks=None):
#Initialize empty callbacks list and setup default settings if necssary
if callbacks is None:
self.callbacks = []
else:
self.callbacks = callbacks[:] # Create a copy
if interval is None:
self._interval = 1000
else:
self._interval = interval
self._single = False
# Default attribute for holding the GUI-specific timer object
self._timer = None
def __del__(self):
'Need to stop timer and possibly disconnect timer.'
self._timer_stop()
def start(self, interval=None):
'''
Start the timer object. `interval` is optional and will be used
to reset the timer interval first if provided.
'''
if interval is not None:
self._set_interval(interval)
self._timer_start()
def stop(self):
'''
Stop the timer.
'''
self._timer_stop()
def _timer_start(self):
pass
def _timer_stop(self):
pass
def _get_interval(self):
return self._interval
def _set_interval(self, interval):
# Force to int since none of the backends actually support fractional
# milliseconds, and some error or give warnings.
interval = int(interval)
self._interval = interval
self._timer_set_interval()
interval = property(_get_interval, _set_interval)
def _get_single_shot(self):
return self._single
def _set_single_shot(self, ss=True):
self._single = ss
self._timer_set_single_shot()
single_shot = property(_get_single_shot, _set_single_shot)
def add_callback(self, func, *args, **kwargs):
'''
Register `func` to be called by timer when the event fires. Any
additional arguments provided will be passed to `func`.
'''
self.callbacks.append((func, args, kwargs))
def remove_callback(self, func, *args, **kwargs):
'''
Remove `func` from list of callbacks. `args` and `kwargs` are optional
and used to distinguish between copies of the same function registered
to be called with different arguments.
'''
if args or kwargs:
self.callbacks.remove((func, args, kwargs))
else:
funcs = [c[0] for c in self.callbacks]
if func in funcs:
self.callbacks.pop(funcs.index(func))
def _timer_set_interval(self):
'Used to set interval on underlying timer object.'
pass
def _timer_set_single_shot(self):
'Used to set single shot on underlying timer object.'
pass
def _on_timer(self):
'''
Runs all function that have been registered as callbacks. Functions
can return False (or 0) if they should not be called any more. If there
are no callbacks, the timer is automatically stopped.
'''
for func, args, kwargs in self.callbacks:
ret = func(*args, **kwargs)
# docstring above explains why we use `if ret == False` here,
# instead of `if not ret`.
if ret == False:
self.callbacks.remove((func, args, kwargs))
if len(self.callbacks) == 0:
self.stop()
class Event(object):
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas, guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class CloseEvent(Event):
"""
An event triggered by a figure being closed
In addition to the :class:`Event` attributes, the following event
attributes are defined:
"""
def __init__(self, name, canvas, guiEvent=None):
Event.__init__(self, name, canvas, guiEvent)
class LocationEvent(Event):
"""
An event that has a screen location
The following additional attributes are defined and shown with
their default values.
In addition to the :class:`Event` attributes, the following
event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y, guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas, guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
if self.canvas.mouse_grabber is None:
axes_list = [a for a in self.canvas.figure.get_axes()
if a.in_axes(self)]
else:
axes_list = [self.canvas.mouse_grabber]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axes_list.sort(key=lambda x: x.zorder)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
trans = self.inaxes.transData.inverted()
xdata, ydata = trans.transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes != self.inaxes:
# process axes enter/leave events
try:
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
except:
pass
# See ticket 2901582.
# I think this is a valid exception to the rule
# against catching all exceptions; if anything goes
# wrong, we simply want to move on and process the
# current event.
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events). Note that in the nbagg backend, both the
middle and right clicks return 3 since right clicking will bring
up the context menu in some browsers.
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
dblclick = None # whether or not the event is the result of a double click
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, dblclick=False, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
self.dblclick = dblclick
def __str__(self):
return ("MPL MouseEvent: xy=(%d,%d) xydata=(%s,%s) button=%s " +
"dblclick=%s inaxes=%s") % (self.x, self.y, self.xdata,
self.ydata, self.button,
self.dblclick, self.inaxes)
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- e.g., a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print('on pick line:', zip(xdata[ind], ydata[ind]))
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist,
guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key(s) pressed. Could be **None**, a single case sensitive ascii
character ("g", "G", "#", etc.), a special key
("control", "shift", "f1", "up", etc.) or a
combination of the above (e.g., "ctrl+alt+g", "ctrl+alt+G").
.. note::
Modifier keys will be prefixed to the pressed key and will be in the
order "ctrl", "alt", "super". The exception to this rule is when the
pressed key is itself a modifier key, therefore "ctrl+alt" and
"alt+control" can both be valid key values.
Example usage::
def on_key(event):
print('you pressed', event.key, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase(object):
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event',
'close_event'
]
supports_blit = True
fixed_dpi = None
filetypes = _default_filetypes
if _has_pil:
# JPEG support
register_backend('jpg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
register_backend('jpeg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
# TIFF support
register_backend('tif', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
register_backend('tiff', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
def __init__(self, figure):
self._is_idle_drawing = True
self._is_saving = False
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry()
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event', self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event', self.pick)
self.mouse_grabber = None # the axes currently grabbing mouse
self.toolbar = None # NavigationToolbar2 will set me
self._is_idle_drawing = False
@contextmanager
def _idle_draw_cntx(self):
self._is_idle_drawing = True
yield
self._is_idle_drawing = False
def is_saving(self):
"""
Returns `True` when the renderer is in the process of saving
to a file, rather than rendering for an on-screen buffer.
"""
return self._is_saving
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
# Find the top artist under the cursor
under = self.figure.hitlist(ev)
under.sort(key=lambda x: x.zorder)
h = None
if under:
h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
msg = ("onHilite has been deprecated in 1.5 and will be removed "
"in 1.6. This function has not been used internally by mpl "
"since 2007.")
warnings.warn(msg, mplDeprecation)
if not hasattr(self, '_active'):
self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
# On leave restore the captured colour
for a in leave:
if hasattr(a, 'get_color'):
a.set_color(self._active[a])
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a, 'get_color'):
self._active[a] = a.get_color()
elif hasattr(a, 'get_edgecolor'):
self._active[a] = (a.get_edgecolor(), a.get_facecolor())
else:
self._active[a] = None
for a in enter:
if hasattr(a, 'get_color'):
a.set_color('red')
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else:
self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def close_event(self, guiEvent=None):
"""
This method will be called by all functions connected to the
'close_event' with a :class:`CloseEvent`
"""
s = 'close_event'
try:
event = CloseEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
except (TypeError, AttributeError):
pass
# Suppress the TypeError when the python session is being killed.
# It may be that a better solution would be a mechanism to
# disconnect all callbacks upon shutdown.
# AttributeError occurs on OSX with qt4agg upon exiting
# with an open window; 'callbacks' attribute no longer exists.
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist,
guiEvent=mouseevent.guiEvent,
**kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, dblclick=False, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key,
dblclick=dblclick, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
self._lastx, self._lasty = None, None
def enter_notify_event(self, guiEvent=None, xy=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
*xy*
the coordinate location of the pointer when the canvas is
entered
"""
if xy is not None:
x, y = xy
self._lastx, self._lasty = x, y
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
"""Called when GUI is idle."""
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def grab_mouse(self, ax):
"""
Set the child axes which are currently grabbing the mouse events.
Usually called by the widgets themselves.
It is an error to call this if the mouse is already grabbed by
another axes.
"""
if self.mouse_grabber not in (None, ax):
raise RuntimeError('two different attempted to grab mouse input')
self.mouse_grabber = ax
def release_mouse(self, ax):
"""
Release the mouse grab held by the axes, ax.
Usually called by the widgets.
It is ok to call this even if you ax doesn't have the mouse
grab currently.
"""
if self.mouse_grabber is ax:
self.mouse_grabber = None
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
if not self._is_idle_drawing:
with self._idle_draw_cntx():
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
Return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
@classmethod
def get_supported_filetypes(cls):
"""Return dict of savefig file formats supported by this backend"""
return cls.filetypes
@classmethod
def get_supported_filetypes_grouped(cls):
"""Return a dict of savefig file formats supported by this backend,
where the keys are a file type name, such as 'Joint Photographic
Experts Group', and the values are a list of filename extensions used
for that filetype, such as ['jpg', 'jpeg']."""
groupings = {}
for ext, name in six.iteritems(cls.filetypes):
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def _get_output_canvas(self, format):
"""Return a canvas that is suitable for saving figures to a specified
file format. If necessary, this function will switch to a registered
backend that supports the format.
"""
method_name = 'print_%s' % format
# check if this canvas supports the requested format
if hasattr(self, method_name):
return self
# check if there is a default canvas for the requested format
canvas_class = get_registered_canvas_class(format)
if canvas_class:
return self.switch_backends(canvas_class)
# else report error for unsupported format
formats = sorted(self.get_supported_filetypes())
raise ValueError('Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation*
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
*bbox_inches*
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure. If None, use savefig.bbox
*pad_inches*
Amount of padding around the figure when bbox_inches is
'tight'. If None, use savefig.pad_inches
*bbox_extra_artists*
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
self._is_saving = True
if format is None:
# get format from filename, or from backend's default filetype
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
# get canvas object and print method for format
canvas = self._get_output_canvas(format)
print_method = getattr(canvas, 'print_%s' % format)
if dpi is None:
dpi = rcParams['savefig.dpi']
if dpi == 'figure':
dpi = self.figure.dpi
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
bbox_inches = kwargs.pop("bbox_inches", None)
if bbox_inches is None:
bbox_inches = rcParams['savefig.bbox']
if bbox_inches:
# call adjust_bbox to save only the given area
if bbox_inches == "tight":
# when bbox_inches == "tight", it saves the figure
# twice. The first save command is just to estimate
# the bounding box of the figure. A stringIO object is
# used as a temporary file object, but it causes a
# problem for some backends (ps backend with
# usetex=True) if they expect a filename, not a
# file-like object. As I think it is best to change
# the backend to support file-like object, i'm going
# to leave it as it is. However, a better solution
# than stringIO seems to be needed. -JJL
#result = getattr(self, method_name)
result = print_method(
io.BytesIO(),
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
dryrun=True,
**kwargs)
renderer = self.figure._cachedRenderer
bbox_inches = self.figure.get_tightbbox(renderer)
bbox_artists = kwargs.pop("bbox_extra_artists", None)
if bbox_artists is None:
bbox_artists = self.figure.get_default_bbox_extra_artists()
bbox_filtered = []
for a in bbox_artists:
bbox = a.get_window_extent(renderer)
if a.get_clip_on():
clip_box = a.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = a.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox,
clip_path.get_extents())
if bbox is not None and (bbox.width != 0 or
bbox.height != 0):
bbox_filtered.append(bbox)
if bbox_filtered:
_bbox = Bbox.union(bbox_filtered)
trans = Affine2D().scale(1.0 / self.figure.dpi)
bbox_extra = TransformedBbox(_bbox, trans)
bbox_inches = Bbox.union([bbox_inches, bbox_extra])
pad = kwargs.pop("pad_inches", None)
if pad is None:
pad = rcParams['savefig.pad_inches']
bbox_inches = bbox_inches.padded(pad)
restore_bbox = tight_bbox.adjust_bbox(self.figure, bbox_inches,
canvas.fixed_dpi)
_bbox_inches_restore = (bbox_inches, restore_bbox)
else:
_bbox_inches_restore = None
try:
#result = getattr(self, method_name)(
result = print_method(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
bbox_inches_restore=_bbox_inches_restore,
**kwargs)
finally:
if bbox_inches and restore_bbox:
restore_bbox()
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
self._is_saving = False
#self.figure.canvas.draw() ## seems superfluous
return result
@classmethod
def get_default_filetype(cls):
"""
Get the default savefig file format as specified in rcParam
``savefig.format``. Returned string excludes period. Overridden
in backends that only support a single file type.
"""
return rcParams['savefig.format']
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
return self.manager.get_window_title()
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def get_default_filename(self):
"""
Return a string, which includes extension, suitable for use as
a default filename.
"""
default_basename = self.get_window_title() or 'image'
default_basename = default_basename.lower().replace(' ', '_')
default_filetype = self.get_default_filetype()
default_filename = default_basename + '.' + default_filetype
save_dir = os.path.expanduser(rcParams.get('savefig.directory', ''))
# ensure non-existing filename in save dir
i = 1
while os.path.isfile(os.path.join(save_dir, default_filename)):
# attach numerical count to basename
default_filename = '{0}-{1}.{2}'.format(default_basename, i, default_filetype)
i += 1
return default_filename
def switch_backends(self, FigureCanvasClass):
"""
Instantiate an instance of FigureCanvasClass
This is used for backend switching, e.g., to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (e.g., setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
newCanvas._is_saving = self._is_saving
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
- 'figure_enter_event',
- 'figure_leave_event',
- 'axes_enter_event',
- 'axes_leave_event'
- 'close_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = canvas.mpl_connect('button_press_event', on_press)
"""
if s == 'idle_event':
warn_deprecated(1.5,
"idle_event is only implemented for the wx backend, and will "
"be removed in matplotlib 2.1. Use the animations module "
"instead.")
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
Disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only for
backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerBase(*args, **kwargs)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self, timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str, mplDeprecation)
if timeout <= 0:
timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter * timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
def key_press_handler(event, canvas, toolbar=None):
"""
Implement the default mpl key bindings for the canvas and toolbar
described at :ref:`key-event-handling`
*event*
a :class:`KeyEvent` instance
*canvas*
a :class:`FigureCanvasBase` instance
*toolbar*
a :class:`NavigationToolbar2` instance
"""
# these bindings happen whether you are over an axes or not
if event.key is None:
return
# Load key-mappings from your matplotlibrc file.
fullscreen_keys = rcParams['keymap.fullscreen']
home_keys = rcParams['keymap.home']
back_keys = rcParams['keymap.back']
forward_keys = rcParams['keymap.forward']
pan_keys = rcParams['keymap.pan']
zoom_keys = rcParams['keymap.zoom']
save_keys = rcParams['keymap.save']
quit_keys = rcParams['keymap.quit']
grid_keys = rcParams['keymap.grid']
toggle_yscale_keys = rcParams['keymap.yscale']
toggle_xscale_keys = rcParams['keymap.xscale']
all = rcParams['keymap.all_axes']
# toggle fullscreen mode (default key 'f')
if event.key in fullscreen_keys:
try:
canvas.manager.full_screen_toggle()
except AttributeError:
pass
# quit the figure (defaut key 'ctrl+w')
if event.key in quit_keys:
Gcf.destroy_fig(canvas.figure)
if toolbar is not None:
# home or reset mnemonic (default key 'h', 'home' and 'r')
if event.key in home_keys:
toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in back_keys:
toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in forward_keys:
toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in pan_keys:
toolbar.pan()
toolbar._set_cursor(event)
# zoom mnemonic (default key 'o')
elif event.key in zoom_keys:
toolbar.zoom()
toolbar._set_cursor(event)
# saving current figure (default key 's')
elif event.key in save_keys:
toolbar.save_figure()
if event.inaxes is None:
return
# these bindings require the mouse to be over an axes to trigger
# switching on/off a grid in current axes (default key 'g')
if event.key in grid_keys:
event.inaxes.grid()
canvas.draw()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in toggle_yscale_keys:
ax = event.inaxes
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale == 'linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in toggle_xscale_keys:
ax = event.inaxes
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif scalex == 'linear':
ax.set_xscale('log')
ax.figure.canvas.draw()
elif (event.key.isdigit() and event.key != '0') or event.key in all:
# keys in list 'all' enables all axes (default key 'a'),
# otherwise if key is a number only enable this particular axes
# if it was the axes, where the event was raised
if not (event.key in all):
n = int(event.key) - 1
for i, a in enumerate(canvas.figure.get_axes()):
# consider axes, in which the event was raised
# FIXME: Why only this axes?
if event.x is not None and event.y is not None \
and a.in_axes(event):
if event.key in all:
a.set_navigate(True)
else:
a.set_navigate(i == n)
class NonGuiException(Exception):
pass
class FigureManagerBase(object):
"""
Helper class for pyplot mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure number
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
if rcParams['toolbar'] != 'toolmanager':
self.key_press_handler_id = self.canvas.mpl_connect(
'key_press_event',
self.key_press)
else:
self.key_press_handler_id = None
"""
The returned id from connecting the default key handler via
:meth:`FigureCanvasBase.mpl_connnect`.
To disable default key press handling::
manager, canvas = figure.canvas.manager, figure.canvas
canvas.mpl_disconnect(manager.key_press_handler_id)
"""
def show(self):
"""
For GUI backends, show the figure window and redraw.
For non-GUI backends, raise an exception to be caught
by :meth:`~matplotlib.figure.Figure.show`, for an
optional warning.
"""
raise NonGuiException()
def destroy(self):
pass
def full_screen_toggle(self):
pass
def resize(self, w, h):
""""For gui backends, resize the window (in pixels)."""
pass
def key_press(self, event):
"""
Implement the default mpl key bindings defined at
:ref:`key-event-handling`
"""
if rcParams['toolbar'] != 'toolmanager':
key_press_handler(event, self.canvas, self.canvas.toolbar)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None for non-GUI backends (e.g., a PS backend).
"""
return 'image'
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect for non-GUI backends (e.g., a PS backend).
"""
pass
cursors = tools.cursors
class NavigationToolbar2(object):
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
# list of toolitems to add to the toolbar, format is:
# (
# text, # the text of the button (often not visible to users)
# tooltip_text, # the tooltip shown on hover (where possible)
# image_file, # name of the image for the button (without the extension)
# name_of_method, # name of the method in NavigationToolbar2 to call
# )
toolitems = (
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
('Save', 'Save the figure', 'filesave', 'save_figure'),
)
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time
# of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
self._ids_zoom = []
self._zoom_mode = None
self._button_pressed = None # determined by the button pressed
# at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
"""Display a message on toolbar or in status bar"""
pass
def back(self, *args):
"""move back up the view lim stack"""
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
"""Draw a rectangle rubberband to indicate zoom limits"""
pass
def remove_rubberband(self):
"""Remove the rubberband"""
pass
def forward(self, *args):
"""Move forward in the view lim stack"""
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
"""Restore the original view"""
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def _set_cursor(self, event):
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active == 'ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
elif (self._active == 'PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
def mouse_move(self, event):
self._set_cursor(event)
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
artists = [a for a in event.inaxes.mouseover_set
if a.contains(event)]
if artists:
a = max(enumerate(artists), key=lambda x: x[1].zorder)[1]
if a is not event.inaxes.patch:
data = a.get_cursor_data(event)
if data is not None:
s += ' [%s]' % a.format_cursor_data(data)
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else:
self.set_message(self.mode)
def pan(self, *args):
"""Activate the pan/zoom tool. pan with left button, zoom with right"""
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
"""Called whenver a mouse button is pressed."""
pass
def press_pan(self, event):
"""the press mouse button in pan/zoom mode callback"""
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect('motion_notify_event',
self.drag_pan)
self.press(event)
def press_zoom(self, event):
"""the press mouse button in zoom to rect mode callback"""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom != []:
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self.release(event)
self.draw()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a._get_view()))
id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)
id2 = self.canvas.mpl_connect('key_press_event',
self._switch_on_zoom_mode)
id3 = self.canvas.mpl_connect('key_release_event',
self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
self.press(event)
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self.mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self.mouse_move(event)
def push_current(self):
"""push the current view limits and position onto the stack"""
views = []
pos = []
for a in self.canvas.figure.get_axes():
views.append(a._get_view())
# Store both the original and modified positions
pos.append((
a.get_position(True).frozen(),
a.get_position().frozen()))
self._views.push(views)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
"""this will be called whenever mouse button is released"""
pass
def release_pan(self, event):
"""the release mouse button callback in pan/zoom mode"""
if self._button_pressed is None:
return
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress:
return
self._xypress = []
self._button_pressed = None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
"""the drag callback in pan/zoom mode"""
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def drag_zoom(self, event):
"""the drag callback in zoom mode"""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, view = self._xypress[0]
# adjust x, last, y, last
x1, y1, x2, y2 = a.bbox.extents
x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)
y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)
if self._zoom_mode == "x":
x1, y1, x2, y2 = a.bbox.extents
y, lasty = y1, y2
elif self._zoom_mode == "y":
x1, y1, x2, y2 = a.bbox.extents
x, lastx = x1, x2
self.draw_rubberband(event, x, y, lastx, lasty)
def release_zoom(self, event):
"""the release mouse button callback in zoom to rect mode"""
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
self.remove_rubberband()
if not self._xypress:
return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, view = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
# allows the user to "cancel" a zoom action
# by zooming by less than 5 pixels
if ((abs(x - lastx) < 5 and self._zoom_mode!="y") or
(abs(y - lasty) < 5 and self._zoom_mode!="x")):
self._xypress = None
self.release(event)
self.draw()
return
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a, la):
twinx = True
if a.get_shared_y_axes().joined(a, la):
twiny = True
last_a.append(a)
if self._button_pressed == 1:
direction = 'in'
elif self._button_pressed == 3:
direction = 'out'
else:
continue
a._set_view_from_bbox((lastx, lasty, x, y), direction,
self._zoom_mode, twinx, twiny)
self.draw()
self._xypress = None
self._button_pressed = None
self._zoom_mode = None
self.push_current()
self.release(event)
def draw(self):
"""Redraw the canvases, update the locators"""
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw_idle()
def _update_view(self):
"""Update the viewlim and position from the view and
position stack for each axes
"""
views = self._views()
if views is None:
return
pos = self._positions()
if pos is None:
return
for i, a in enumerate(self.canvas.figure.get_axes()):
a._set_view(views[i])
# Restore both the original and modified positions
a.set_position(pos[i][0], 'original')
a.set_position(pos[i][1], 'active')
self.canvas.draw_idle()
def save_figure(self, *args):
"""Save the current figure"""
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
"""Reset the axes stack"""
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
"""Activate zoom to rect mode"""
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event',
self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event',
self.release_zoom)
self.mode = 'zoom rect'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
"""Enable or disable back/forward button"""
pass
class ToolContainerBase(object):
"""
Base class for all tool containers, e.g. toolbars.
Attributes
----------
toolmanager : `ToolManager` object that holds the tools that
this `ToolContainer` wants to communicate with.
"""
def __init__(self, toolmanager):
self.toolmanager = toolmanager
self.toolmanager.toolmanager_connect('tool_removed_event',
self._remove_tool_cbk)
def _tool_toggled_cbk(self, event):
"""
Captures the 'tool_trigger_[name]'
This only gets used for toggled tools
"""
self.toggle_toolitem(event.tool.name, event.tool.toggled)
def add_tool(self, tool, group, position=-1):
"""
Adds a tool to this container
Parameters
----------
tool : tool_like
The tool to add, see `ToolManager.get_tool`.
group : str
The name of the group to add this tool to.
position : int (optional)
The position within the group to place this tool. Defaults to end.
"""
tool = self.toolmanager.get_tool(tool)
image = self._get_image_filename(tool.image)
toggle = getattr(tool, 'toggled', None) is not None
self.add_toolitem(tool.name, group, position,
image, tool.description, toggle)
if toggle:
self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,
self._tool_toggled_cbk)
def _remove_tool_cbk(self, event):
"""Captures the 'tool_removed_event' signal and removes the tool"""
self.remove_toolitem(event.tool.name)
def _get_image_filename(self, image):
"""Find the image based on its name"""
# TODO: better search for images, they are not always in the
# datapath
basedir = os.path.join(rcParams['datapath'], 'images')
if image is not None:
fname = os.path.join(basedir, image)
else:
fname = None
return fname
def trigger_tool(self, name):
"""
Trigger the tool
Parameters
----------
name : String
Name(id) of the tool triggered from within the container
"""
self.toolmanager.trigger_tool(name, sender=self)
def add_toolitem(self, name, group, position, image, description, toggle):
"""
Add a toolitem to the container
This method must get implemented per backend
The callback associated with the button click event,
must be **EXACTLY** `self.trigger_tool(name)`
Parameters
----------
name : string
Name of the tool to add, this gets used as the tool's ID and as the
default label of the buttons
group : String
Name of the group that this tool belongs to
position : Int
Position of the tool within its group, if -1 it goes at the End
image_file : String
Filename of the image for the button or `None`
description : String
Description of the tool, used for the tooltips
toggle : Bool
* `True` : The button is a toggle (change the pressed/unpressed
state between consecutive clicks)
* `False` : The button is a normal button (returns to unpressed
state after release)
"""
raise NotImplementedError
def toggle_toolitem(self, name, toggled):
"""
Toggle the toolitem without firing event
Parameters
----------
name : String
Id of the tool to toggle
toggled : bool
Whether to set this tool as toggled or not.
"""
raise NotImplementedError
def remove_toolitem(self, name):
"""
Remove a toolitem from the `ToolContainer`
This method must get implemented per backend
Called when `ToolManager` emits a `tool_removed_event`
Parameters
----------
name : string
Name of the tool to remove
"""
raise NotImplementedError
class StatusbarBase(object):
"""Base class for the statusbar"""
def __init__(self, toolmanager):
self.toolmanager = toolmanager
self.toolmanager.toolmanager_connect('tool_message_event',
self._message_cbk)
def _message_cbk(self, event):
"""Captures the 'tool_message_event' and set the message"""
self.set_message(event.message)
def set_message(self, s):
"""
Display a message on toolbar or in status bar
Parameters
----------
s : str
Message text
"""
pass
| mit |
dpshelio/sunpy | sunpy/instr/lyra.py | 2 | 31288 | import csv
import copy
import urllib
import os.path
import sqlite3
import datetime
from warnings import warn
import numpy as np
import pandas
from astropy.io import fits
from astropy.time import Time
from sunpy.time import parse_time
from sunpy.util.net import check_download_file
from sunpy.util.config import get_and_create_download_dir
LYTAF_REMOTE_PATH = "http://proba2.oma.be/lyra/data/lytaf/"
__all__ = ['remove_lytaf_events_from_timeseries',
'get_lytaf_events',
'get_lytaf_event_types',
'download_lytaf_database',
'split_series_using_lytaf']
def remove_lytaf_events_from_timeseries(ts, artifacts=None,
return_artifacts=False,
lytaf_path=None,
force_use_local_lytaf=False):
"""
Removes periods of LYRA artifacts defined in LYTAF from a TimeSeries.
Parameters
----------
ts : `sunpy.timeseries.TimeSeries`
artifacts : list of strings
Sets the artifact types to be removed. For a list of artifact types
see reference [1]. For example, if a user wants to remove only large
angle rotations, listed at reference [1] as LAR, set artifacts=["LAR"].
The default is that no artifacts will be removed.
return_artifacts : `bool`
Set to True to return a `numpy.recarray` containing the start time, end
time and type of all artifacts removed.
Default=False
lytaf_path : `str`
directory path where the LYRA annotation files are stored.
force_use_local_lytaf : `bool`
Ensures current local version of lytaf files are not replaced by
up-to-date online versions even if current local lytaf files do not
cover entire input time range etc.
Default=False
Returns
-------
ts_new : `sunpy.timeseries.TimeSeries`
copy of input TimeSeries with periods corresponding to artifacts
removed.
artifact_status : `dict`
List of 4 variables containing information on what artifacts were
found, removed, etc. from the time series.
| **artifact_status["lytaf"]** : `numpy.recarray`
| The full LYRA annotation file for the time series time range
| output by get_lytaf_events().
| **artifact_status["removed"]** : `numpy.recarray`
| Artifacts which were found and removed from from time series.
| **artifact_status["not_removed"]** : `numpy.recarray`
| Artifacts which were found but not removed as they were not
| included when user defined artifacts kwarg.
| **artifact_status["not_found"]** : `list` of strings
| Artifacts listed to be removed by user when defining
| artifacts kwarg which were not found in time series time range.
Notes
-----
This function is intended to take TimeSeries objects as input, but the
deprecated LightCurve is still supported here.
References
----------
[1] http://proba2.oma.be/data/TARDIS
Examples
--------
Remove LARs (Large Angle Rotations) from TimeSeries for 4-Dec-2014:
>>> import sunpy.timeseries as ts
>>> import sunpy.data.sample # doctest: +REMOTE_DATA
>>> from sunpy.instr.lyra import remove_lytaf_events_from_timeseries
>>> lyrats = ts.TimeSeries(sunpy.data.sample.LYRA_LEVEL3_TIMESERIES, source='LYRA') # doctest: +REMOTE_DATA
>>> ts_nolars = remove_lytaf_events_from_timeseries(lyrats, artifacts=["LAR"]) # doctest: +REMOTE_DATA
To also retrieve information on the artifacts during that day:
>>> ts_nolars, artifact_status = remove_lytaf_events_from_timeseries(
... lyrats, artifacts=["LAR"], return_artifacts=True) # doctest: +REMOTE_DATA
"""
# Check that input argument is of correct type
if not lytaf_path:
lytaf_path = get_and_create_download_dir()
# Remove artifacts from time series
data_columns = ts.data.columns
time, channels, artifact_status = _remove_lytaf_events(
ts.data.index,
channels=[np.asanyarray(ts.data[col]) for col in data_columns],
artifacts=artifacts, return_artifacts=True, lytaf_path=lytaf_path,
force_use_local_lytaf=force_use_local_lytaf)
# Create new copy copy of timeseries and replace data with
# artifact-free time series.
ts_new = copy.deepcopy(ts)
ts_new.data = pandas.DataFrame(
index=time, data=dict((col, channels[i])
for i, col in enumerate(data_columns)))
if return_artifacts:
return ts_new, artifact_status
else:
return ts_new
def _remove_lytaf_events(time, channels=None, artifacts=None,
return_artifacts=False, fitsfile=None,
csvfile=None, filecolumns=None,
lytaf_path=None, force_use_local_lytaf=False):
"""
Removes periods of LYRA artifacts from a time series.
This functions removes periods corresponding to certain artifacts recorded
in the LYRA annotation file from an array of times given by the time input.
If a list of arrays of other properties is supplied through the channels
kwarg, then the relevant values from these arrays are also removed. This
is done by assuming that each element in each array supplied corresponds to
the time in the same index in time array. The artifacts to be removed are
given via the artifacts kwarg. The default is "all", meaning that all
artifacts will be removed. However, a subset of artifacts can be removed
by supplying a list of strings of the desired artifact types.
Parameters
----------
time : `numpy.ndarray` of `astropy.time.Time`
Gives the times of the timeseries.
channels : `list` of `numpy.array` convertible to float64.
Contains arrays of the irradiances taken at the times in the time
variable. Each element in the list must have the same number of
elements as time.
artifacts : `list` of strings
Contain the artifact types to be removed. For list of artifact types
see reference [1]. For example, if user wants to remove only large
angle rotations, listed at reference [1] as LAR, let artifacts=["LAR"].
Default=[], i.e. no artifacts will be removed.
return_artifacts : `bool`
Set to True to return a numpy recarray containing the start time, end
time and type of all artifacts removed.
Default=False
fitsfile : `str`
file name (including file path and suffix, .fits) of output fits file
which is generated if this kwarg is not None.
Default=None, i.e. no fits file is output.
csvfile : `str`
file name (including file path and suffix, .csv) of output csv file
which is generated if this kwarg is not None.
Default=None, i.e. no csv file is output.
filecolumns : `list` of strings
Gives names of columns of any output files produced. Although
initially set to None above, the default is in fact
["time", "channel0", "channel1",..."channelN"]
where N is the number of irradiance arrays in the channels input
(assuming 0-indexed counting).
lytaf_path : `str`
directory path where the LYRA annotation files are stored.
force_use_local_lytaf : `bool`
Ensures current local version of lytaf files are not replaced by
up-to-date online versions even if current local lytaf files do not
cover entire input time range etc.
Default=False
Returns
-------
clean_time : `numpy.ndarray` of `astropy.time.Time`
time array with artifact periods removed.
clean_channels : `list` ndarrays/array-likes convertible to float64
list of irradiance arrays with artifact periods removed.
artifact_status : `dict`
List of 4 variables containing information on what artifacts were
found, removed, etc. from the time series.
artifact_status["lytaf"] = artifacts found : `numpy.recarray`
The full LYRA annotation file for the time series time range
output by get_lytaf_events().
artifact_status["removed"] = artifacts removed : `numpy.recarray`
Artifacts which were found and removed from from time series.
artifact_status["not_removed"] = artifacts found but not removed :
`numpy.recarray`
Artifacts which were found but not removed as they were not
included when user defined artifacts kwarg.
artifact_status["not_found"] = artifacts not found : `list` of strings
Artifacts listed to be removed by user when defining artifacts
kwarg which were not found in time series time range.
References
----------
[1] http://proba2.oma.be/data/TARDIS
Example
-------
Sample data for example
>>> from sunpy.time import parse_time
>>> from sunpy.instr.lyra import _remove_lytaf_events
>>> time = parse_time(np.arange('2005-02-01T00:00:00', '2005-02-01T02:00:00',
... dtype='datetime64[m]'))
>>> channel_1 = np.zeros(len(time))+0.4
>>> channel_2 = np.zeros(len(time))+0.1
Remove LARs (Large Angle Rotations) from time series.
>>> time_clean, channels_clean = _remove_lytaf_events(
... time, channels=[channel_1, channel_2], artifacts=['LAR']) # doctest: +SKIP
"""
# Check inputs
if not lytaf_path:
lytaf_path = get_and_create_download_dir()
if channels and type(channels) is not list:
raise TypeError("channels must be None or a list of numpy arrays "
"of dtype 'float64'.")
if not artifacts:
raise ValueError("User has supplied no artifacts to remove.")
if type(artifacts) is str:
artifacts = [artifacts]
if not all(isinstance(artifact_type, str) for artifact_type in artifacts):
raise TypeError("All elements in artifacts must in strings.")
all_lytaf_event_types = get_lytaf_event_types(lytaf_path=lytaf_path,
print_event_types=False)
for artifact in artifacts:
if artifact not in all_lytaf_event_types:
print(all_lytaf_event_types)
raise ValueError("{0} is not a valid artifact type. See above.".format(artifact))
# Define outputs
clean_time = parse_time(time)
clean_channels = copy.deepcopy(channels)
artifacts_not_found = []
# Get LYTAF file for given time range
lytaf = get_lytaf_events(time[0], time[-1], lytaf_path=lytaf_path,
force_use_local_lytaf=force_use_local_lytaf)
# Find events in lytaf which are to be removed from time series.
artifact_indices = np.empty(0, dtype="int64")
for artifact_type in artifacts:
indices = np.where(lytaf["event_type"] == artifact_type)[0]
# If none of a given type of artifact is found, record this
# type in artifact_not_found list.
if len(indices) == 0:
artifacts_not_found.append(artifact_type)
else:
# Else, record the indices of the artifacts of this type
artifact_indices = np.concatenate((artifact_indices, indices))
artifact_indices.sort()
# Remove relevant artifacts from timeseries. If none of the
# artifacts the user wanted removed were found, raise a warning and
# continue with code.
if not len(artifact_indices):
warn("None of user supplied artifacts were found.")
artifacts_not_found = artifacts
else:
# Remove periods corresponding to artifacts from flux and time
# arrays.
bad_indices = np.empty(0, dtype="int64")
all_indices = np.arange(len(time))
for index in artifact_indices:
bad_period = np.logical_and(time >= lytaf["begin_time"][index].datetime,
time <= lytaf["end_time"][index].datetime)
bad_indices = np.append(bad_indices, all_indices[bad_period])
clean_time = np.delete(clean_time, bad_indices)
if channels:
for i, f in enumerate(clean_channels):
clean_channels[i] = np.delete(f, bad_indices)
# If return_artifacts kwarg is True, return a list containing
# information on what artifacts found, removed, etc. See docstring.
if return_artifacts:
artifact_status = {"lytaf": lytaf,
"removed": lytaf[artifact_indices],
"not_removed": np.delete(lytaf, artifact_indices),
"not_found": artifacts_not_found}
# Output FITS file if fits kwarg is set
if fitsfile:
# Create time array of time strings rather than Time objects
# and verify filecolumns have been correctly input. If None,
# generate generic filecolumns (see docstring of function called
# below.
string_time, filecolumns = _prep_columns(time, channels, filecolumns)
# Prepare column objects.
cols = [fits.Column(name=filecolumns[0], format="26A",
array=string_time)]
if channels:
for i, f in enumerate(channels):
cols.append(fits.Column(name=filecolumns[i+1], format="D",
array=f))
coldefs = fits.ColDefs(cols)
tbhdu = fits.new_table(coldefs)
hdu = fits.PrimaryHDU()
tbhdulist = fits.HDUList([hdu, tbhdu])
# Write data to fits file.
tbhdulist.writeto(fitsfile)
# Output csv file if csv kwarg is set.
if csvfile:
# Create time array of time strings rather than Time objects
# and verify filecolumns have been correctly input. If None,
# generate generic filecolumns (see docstring of function called
# below.
string_time, filecolumns = _prep_columns(time, channels, filecolumns)
# Open and write data to csv file.
with open(csvfile, 'w') as openfile:
csvwriter = csv.writer(openfile, delimiter=';')
# Write header.
csvwriter.writerow(filecolumns)
# Write data.
if not channels:
for i in range(len(time)):
csvwriter.writerow(string_time[i])
else:
for i in range(len(time)):
row = [string_time[i]]
for f in channels:
row.append(f[i])
csvwriter.writerow(row)
# Return values.
if return_artifacts:
if not channels:
return clean_time, artifact_status
else:
return clean_time, clean_channels, artifact_status
else:
if not channels:
return clean_time
else:
return clean_time, clean_channels
def get_lytaf_events(start_time, end_time, lytaf_path=None,
combine_files=("lyra", "manual", "ppt", "science"),
csvfile=None, force_use_local_lytaf=False):
"""
Extracts combined lytaf file for given time range.
Given a time range defined by start_time and end_time, this function
extracts the segments of each LYRA annotation file and combines them.
Parameters
----------
start_time : `astropy.time.Time` or `str`
Start time of period for which annotation file is required.
end_time : `astropy.time.Time` or `str`
End time of period for which annotation file is required.
lytaf_path : `str`
directory path where the LYRA annotation files are stored.
combine_files : `tuple` of strings
States which LYRA annotation files are to be combined.
Default is all four, i.e. lyra, manual, ppt, science.
See Notes section for an explanation of each.
force_use_local_lytaf : `bool`
Ensures current local version of lytaf files are not replaced by
up-to-date online versions even if current local lytaf files do not
cover entire input time range etc.
Default=False
Returns
-------
lytaf : `numpy.recarray`
Containing the various parameters stored in the LYTAF files.
Notes
-----
There are four LYRA annotation files which mark different types of events
or artifacts in the data. They are named annotation_suffix.db where
suffix is a variable equalling either lyra, manual, ppt, or science.
annotation_lyra.db : contains entries regarding possible effects to
the data due to normal operation of LYRA instrument.
annotation_manual.db : contains entries regarding possible effects
to the data due to unusual or manually logged events.
annotation_ppt.db : contains entries regarding possible effects to
the data due to pointing or positioning of PROBA2.
annotation_science.db : contains events in the data scientifically
interesting, e.g. GOES flares.
References
----------
Further documentation: http://proba2.oma.be/data/TARDIS
Examples
--------
Get all events in the LYTAF files for January 2014
>>> from sunpy.instr.lyra import get_lytaf_events
>>> lytaf = get_lytaf_events('2014-01-01', '2014-02-01') # doctest: +SKIP
"""
# Check inputs
# Check lytaf path
if not lytaf_path:
lytaf_path = get_and_create_download_dir()
# Parse start_time and end_time
start_time = parse_time(start_time)
end_time = parse_time(end_time)
# Check combine_files contains correct inputs
if not all(suffix in ["lyra", "manual", "ppt", "science"]
for suffix in combine_files):
raise ValueError("Elements in combine_files must be strings equalling "
"'lyra', 'manual', 'ppt', or 'science'.")
# Remove any duplicates from combine_files input
combine_files = list(set(combine_files))
combine_files.sort()
# Convert input times to UNIX timestamp format since this is the
# time format in the annotation files
start_time_uts = (start_time - Time('1970-1-1')).sec
end_time_uts = (end_time - Time('1970-1-1')).sec
# Define numpy record array which will hold the information from
# the annotation file.
lytaf = np.empty((0,), dtype=[("insertion_time", object),
("begin_time", object),
("reference_time", object),
("end_time", object),
("event_type", object),
("event_definition", object)])
# Access annotation files
for suffix in combine_files:
# Check database files are present
dbname = "annotation_{0}.db".format(suffix)
check_download_file(dbname, LYTAF_REMOTE_PATH, lytaf_path)
# Open SQLITE3 annotation files
connection = sqlite3.connect(os.path.join(lytaf_path, dbname))
# Create cursor to manipulate data in annotation file
cursor = connection.cursor()
# Check if lytaf file spans the start and end times defined by
# user. If not, download newest version.
# First get start time of first event and end time of last
# event in lytaf.
cursor.execute("select begin_time from event order by begin_time asc "
"limit 1;")
db_first_begin_time = cursor.fetchone()[0]
db_first_begin_time = datetime.datetime.fromtimestamp(db_first_begin_time)
cursor.execute("select end_time from event order by end_time desc "
"limit 1;")
db_last_end_time = cursor.fetchone()[0]
db_last_end_time = datetime.datetime.fromtimestamp(db_last_end_time)
# If lytaf does not include entire input time range...
if not force_use_local_lytaf:
if end_time > db_last_end_time or start_time < db_first_begin_time:
# ...close lytaf file...
cursor.close()
connection.close()
# ...Download latest lytaf file...
check_download_file(dbname, LYTAF_REMOTE_PATH, lytaf_path,
replace=True)
# ...and open new version of lytaf database.
connection = sqlite3.connect(os.path.join(lytaf_path, dbname))
cursor = connection.cursor()
# Select and extract the data from event table within file within
# given time range
cursor.execute("select insertion_time, begin_time, reference_time, "
"end_time, eventType_id from event where end_time >= "
"{0} and begin_time <= "
"{1}".format(start_time_uts, end_time_uts))
event_rows = cursor.fetchall()
# Select and extract the event types from eventType table
cursor.row_factory = sqlite3.Row
cursor.execute("select * from eventType")
eventType_rows = cursor.fetchall()
eventType_id = []
eventType_type = []
eventType_definition = []
for eventType_row in eventType_rows:
eventType_id.append(eventType_row["id"])
eventType_type.append(eventType_row["type"])
eventType_definition.append(eventType_row["definition"])
# Enter desired information into the lytaf numpy record array
for event_row in event_rows:
id_index = eventType_id.index(event_row[4])
lytaf = np.append(lytaf,
np.array((Time(datetime.datetime.utcfromtimestamp(event_row[0]),
format='datetime'),
Time(datetime.datetime.utcfromtimestamp(event_row[1]),
format='datetime'),
Time(datetime.datetime.utcfromtimestamp(event_row[2]),
format='datetime'),
Time(datetime.datetime.utcfromtimestamp(event_row[3]),
format='datetime'),
eventType_type[id_index],
eventType_definition[id_index]), dtype=lytaf.dtype))
# Close file
cursor.close()
connection.close()
# Sort lytaf in ascending order of begin time
np.recarray.sort(lytaf, order="begin_time")
# If csvfile kwarg is set, write out lytaf to csv file
if csvfile:
# Open and write data to csv file.
with open(csvfile, 'w') as openfile:
csvwriter = csv.writer(openfile, delimiter=';')
# Write header.
csvwriter.writerow(lytaf.dtype.names)
# Write data.
for row in lytaf:
new_row = []
new_row.append(row[0].strftime("%Y-%m-%dT%H:%M:%S"))
new_row.append(row[1].strftime("%Y-%m-%dT%H:%M:%S"))
new_row.append(row[2].strftime("%Y-%m-%dT%H:%M:%S"))
new_row.append(row[3].strftime("%Y-%m-%dT%H:%M:%S"))
new_row.append(row[4])
new_row.append(row[5])
csvwriter.writerow(new_row)
return lytaf
def get_lytaf_event_types(lytaf_path=None, print_event_types=True):
"""Prints the different event types in the each of the LYTAF databases.
Parameters
----------
lytaf_path : `str`
Path location where LYTAF files are stored.
Default = Path stored in confog file.
print_event_types : `bool`
If True, prints the artifacts in each lytaf database to screen.
Returns
-------
all_event_types : `list`
List of all events types in all lytaf databases.
"""
# Set lytaf_path is not done by user
if not lytaf_path:
lytaf_path = get_and_create_download_dir()
suffixes = ["lyra", "manual", "ppt", "science"]
all_event_types = []
# For each database file extract the event types and print them.
if print_event_types:
print("\nLYTAF Event Types\n-----------------\n")
for suffix in suffixes:
dbname = "annotation_{0}.db".format(suffix)
# Check database file exists, else download it.
check_download_file(dbname, LYTAF_REMOTE_PATH, lytaf_path)
# Open SQLITE3 LYTAF files
connection = sqlite3.connect(os.path.join(lytaf_path, dbname))
# Create cursor to manipulate data in annotation file
cursor = connection.cursor()
cursor.execute("select type from eventType;")
event_types = cursor.fetchall()
all_event_types.append(event_types)
if print_event_types:
print("----------------\n{0} database\n----------------"
.format(suffix))
for event_type in event_types:
print(str(event_type[0]))
print(" ")
# Unpack event types in all_event_types into single list
all_event_types = [event_type[0] for event_types in all_event_types
for event_type in event_types]
return all_event_types
def download_lytaf_database(lytaf_dir=''):
"""download latest Proba2 pointing database from Proba2 Science Center"""
url = 'http://proba2.oma.be/lyra/data/lytaf/annotation_ppt.db'
destination = os.path.join(lytaf_dir, 'annotation_ppt.db')
urllib.request.urlretrieve(url, destination)
return
def split_series_using_lytaf(timearray, data, lytaf):
"""
Proba-2 analysis code for splitting up LYRA timeseries around locations
where LARs (and other data events) are observed.
Parameters
----------
timearray : `numpy.ndarray` of times understood by `sunpy.time.parse_time`
function.
data : `numpy.array` corresponding to the given time array
lytaf : `numpy.recarray`
Events obtained from querying LYTAF database using
lyra.get_lytaf_events().
Output
------
output : `list` of dictionaries
Each dictionary contains a sub-series corresponding to an interval of
'good data'.
"""
n = len(timearray)
mask = np.ones(n)
el = len(lytaf)
# make the input time array a list of Time objects
time_array = [parse_time(tim) for tim in timearray]
# scan through each entry retrieved from the LYTAF database
for j in range(0, el):
# want to mark all times with events as bad in the mask, i.e. = 0
start_dt = lytaf['begin_time'][j]
end_dt = lytaf['end_time'][j]
# find the start and end indices for each event
start_ind = np.searchsorted(time_array, start_dt)
end_ind = np.searchsorted(time_array, end_dt)
# append the mask to mark event as 'bad'
mask[start_ind:end_ind] = 0
diffmask = np.diff(mask)
tmp_discontinuity = np.where(diffmask != 0.)
# disc contains the indices of mask where there are discontinuities
disc = tmp_discontinuity[0]
if len(disc) == 0:
print('No events found within time series interval. '
'Returning original series.')
return [{'subtimes': time_array, 'subdata': data}]
# -1 in diffmask means went from good data to bad
# +1 means went from bad data to good
# want to get the data between a +1 and the next -1
# if the first discontinuity is a -1 then the start of the series was good.
if diffmask[disc[0]] == -1.0:
# make sure we can always start from disc[0] below
disc = np.insert(disc, 0, 0)
split_series = []
limit = len(disc)
# now extract the good data regions and ignore the bad ones
for h in range(0, limit, 2):
if h == limit-1:
# can't index h+1 here. Go to end of series
subtimes = time_array[disc[h]:-1]
subdata = data[disc[h]:-1]
subseries = {'subtimes': subtimes, 'subdata': subdata}
split_series.append(subseries)
else:
subtimes = time_array[disc[h]:disc[h+1]]
subdata = data[disc[h]:disc[h+1]]
subseries = {'subtimes': subtimes, 'subdata': subdata}
split_series.append(subseries)
return split_series
def _lytaf_event2string(integers):
if type(integers) == int:
integers = [integers]
# else:
# n=len(integers)
out = []
for i in integers:
if i == 1:
out.append('LAR')
if i == 2:
out.append('N/A')
if i == 3:
out.append('UV occult.')
if i == 4:
out.append('Vis. occult.')
if i == 5:
out.append('Offpoint')
if i == 6:
out.append('SAA')
if i == 7:
out.append('Auroral zone')
if i == 8:
out.append('Moon in LYRA')
if i == 9:
out.append('Moon in SWAP')
if i == 10:
out.append('Venus in LYRA')
if i == 11:
out.append('Venus in SWAP')
return out
def _prep_columns(time, channels=None, filecolumns=None):
"""
Checks and prepares data to be written out to a file.
Firstly, this function converts the elements of time, whose entries are
assumed to be `astropy.time.Time` or `np.ndarray` objects, to time strings. Secondly, it checks
whether the number of elements in an input list of column names,
filecolumns, is equal to the number of arrays in the list, channels.
If not, a ValueError is raised. If however filecolumns equals None, a
filenames list is generated equal to ["time", "channel0", "channel1",...,
"channelN"] where N is the number of arrays in the list, channels
(assuming 0-indexed counting).
"""
# Convert np.array or Time objects to time strings.
time = parse_time(time)
time.precision = 9
string_time = np.array(time.isot)
# If filenames is given...
if filecolumns:
# ...check all the elements are strings...
if all(isinstance(column, str) for column in filecolumns) is False:
raise TypeError("All elements in filecolumns must by strings.")
# ...and that there are the same number of elements as there
# are arrays in channels, plus 1 for a time array. Otherwise
# raise a ValueError.
if channels:
ncol = 1 + len(channels)
else:
ncol = 1
if len(filecolumns) != ncol:
raise ValueError("Number of elements in filecolumns must be "
"equal to the number of input data arrays, "
"i.e. time + elements in channels.")
# If filenames not given, create a list of columns names of the
# form: ["time", "channel0", "channel1",...,"channelN"] where N
# is the number of arrays in channels (assuming 0-indexed counting).
else:
if channels:
filecolumns = ["channel{0}".format(fluxnum)
for fluxnum in range(len(channels))]
filecolumns.insert(0, "time")
else:
filecolumns = ["time"]
return string_time, filecolumns
| bsd-2-clause |
michalsenkyr/spark | examples/src/main/python/sql/arrow.py | 16 | 5034 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Arrow in Spark.
Run with:
./bin/spark-submit examples/src/main/python/sql/arrow.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.sql.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
def dataframe_with_arrow_example(spark):
# $example on:dataframe_with_arrow$
import numpy as np
import pandas as pd
# Enable Arrow-based columnar data transfers
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Generate a Pandas DataFrame
pdf = pd.DataFrame(np.random.rand(100, 3))
# Create a Spark DataFrame from a Pandas DataFrame using Arrow
df = spark.createDataFrame(pdf)
# Convert the Spark DataFrame back to a Pandas DataFrame using Arrow
result_pdf = df.select("*").toPandas()
# $example off:dataframe_with_arrow$
print("Pandas DataFrame result statistics:\n%s\n" % str(result_pdf.describe()))
def scalar_pandas_udf_example(spark):
# $example on:scalar_pandas_udf$
import pandas as pd
from pyspark.sql.functions import col, pandas_udf
from pyspark.sql.types import LongType
# Declare the function and create the UDF
def multiply_func(a, b):
return a * b
multiply = pandas_udf(multiply_func, returnType=LongType())
# The function for a pandas_udf should be able to execute with local Pandas data
x = pd.Series([1, 2, 3])
print(multiply_func(x, x))
# 0 1
# 1 4
# 2 9
# dtype: int64
# Create a Spark DataFrame, 'spark' is an existing SparkSession
df = spark.createDataFrame(pd.DataFrame(x, columns=["x"]))
# Execute function as a Spark vectorized UDF
df.select(multiply(col("x"), col("x"))).show()
# +-------------------+
# |multiply_func(x, x)|
# +-------------------+
# | 1|
# | 4|
# | 9|
# +-------------------+
# $example off:scalar_pandas_udf$
def grouped_map_pandas_udf_example(spark):
# $example on:grouped_map_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP)
def subtract_mean(pdf):
# pdf is a pandas.DataFrame
v = pdf.v
return pdf.assign(v=v - v.mean())
df.groupby("id").apply(subtract_mean).show()
# +---+----+
# | id| v|
# +---+----+
# | 1|-0.5|
# | 1| 0.5|
# | 2|-3.0|
# | 2|-1.0|
# | 2| 4.0|
# +---+----+
# $example off:grouped_map_pandas_udf$
def grouped_agg_pandas_udf_example(spark):
# $example on:grouped_agg_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql import Window
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("double", PandasUDFType.GROUPED_AGG)
def mean_udf(v):
return v.mean()
df.groupby("id").agg(mean_udf(df['v'])).show()
# +---+-----------+
# | id|mean_udf(v)|
# +---+-----------+
# | 1| 1.5|
# | 2| 6.0|
# +---+-----------+
w = Window \
.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
df.withColumn('mean_v', mean_udf(df['v']).over(w)).show()
# +---+----+------+
# | id| v|mean_v|
# +---+----+------+
# | 1| 1.0| 1.5|
# | 1| 2.0| 1.5|
# | 2| 3.0| 6.0|
# | 2| 5.0| 6.0|
# | 2|10.0| 6.0|
# +---+----+------+
# $example off:grouped_agg_pandas_udf$
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Arrow-in-Spark example") \
.getOrCreate()
print("Running Pandas to/from conversion example")
dataframe_with_arrow_example(spark)
print("Running pandas_udf scalar example")
scalar_pandas_udf_example(spark)
print("Running pandas_udf grouped map example")
grouped_map_pandas_udf_example(spark)
spark.stop()
| apache-2.0 |
hugobowne/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 87 | 2510 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
tobybreckon/bee-wi | examples/pathFollower.py | 1 | 17217 | # Copyright (c) 2014
# Joey Green, School of Engineering and Computer Sciences, Durham University, UK
# All versions of this software (both binary and source) must retain
# and display this copyright notice.
# License : GPL - http://www.gnu.org/copyleft/gpl.html
# ******************** PATH FOLLOWER MODULE *********************
#
# Author: Joey Green
# Date: 05/09/14
#
# Description: This module uses the Buggy module, and allows the Buggy to
# follow red signs containing arrows placed on the floor.
#
# ***************************************************************
import sys
import cv2
import cv2.cv as cv
import time
import numpy as np
from matplotlib import pyplot as plt
from scipy import ndimage
sys.path.append('../sdk')
from buggy import Buggy
# ********************** VARIABLES **********************
#
# Modify these to tailor to your environment.
#
DETECT_FLOOR, DETECT_WALL = True, False # Whether to detect signs on the floor and/or wall (at least one must be True!)
MAX_RECOGNISING_COUNT = 20 # The higher this is, the more accurate and slower it will be
MAX_TURN_STEPS = 1 # How many times the buggy continues turning after a sign
# The file location of the picture of the CROSS (X) to make the Buggy STOP
PATH_CROSS = '..\\doc\\symbols\\cross_simple.png'
# The number of arrow images (usually an odd number - left, right and forward)
NUMBER_OF_ARROWS = 13
# The path to the directory that contains the arrows for turning the Buggy (must be labelled from 00)
PATH_ARROW_FOLDER = '..\\doc\\symbols\\15_step\\'
if DETECT_FLOOR: # Image and Object Points to warp the camera perspective for detecting signs on the floor
PATH_IMAGE_POINTS = '..\\doc\\imagePoints\\imgPts.npy'
PATH_OBJECT_POINTS = '..\\doc\\imagePoints\\objPts.npy'
CHROMA_RED_THRESHOLD = 130 # The threshold to use in binary threshing of the Chroma Red Channel (default 135)
DISPLAY = True # Whether you want to display what the Buggy sees
# *******************************************************
5# ******************** LOCAL FUNCTIONS ******************
def wait():
time.sleep(0.01)
def mean(nums):
if len(nums):
return int( round(float(sum(nums)) / len(nums)))
else:
return 0
def follow_arrow_direction(best_arrow, moving_forward, wheel_direction, arrow_buffer):
# Left
if best_arrow < (NUMBER_OF_ARROWS//2):
print '{0} Match: {1} degrees Left'.format(str(best_result)[:5], 90-(ARROW_DEGREE_DIFF*best_arrow))
wheel_direction = "left"
buggy.turnLeft(90-(ARROW_DEGREE_DIFF*best_arrow))
# Right
elif best_arrow > (NUMBER_OF_ARROWS//2):
print '{0} Match: {1} degrees Right'.format(str(best_result)[:5], 90-(ARROW_DEGREE_DIFF*(NUMBER_OF_ARROWS-best_arrow-1)))
wheel_direction = "right"
buggy.turnRight(90-(ARROW_DEGREE_DIFF*(NUMBER_OF_ARROWS-best_arrow-1)))
# Forward
else:
print '{0} Match: Straight Forward'.format(str(best_result)[:5])
if wheel_direction is not "neutral":
wheel_direction = "neutral"
buggy.straightenWheels()
confident_match = True if best_result > 0.2 else False # Will only move forward fast if confident
moving_forward = True
del arrow_buffer[:]
return moving_forward, wheel_direction, confident_match, arrow_buffer
# *****************************************************
# ******************* PROGRAM *************************
# Instantiate Buggy
buggy = Buggy()
# If connected
if buggy.isConnected:
# Set up signs for template matching
sign_cross = cv2.imread(PATH_CROSS, 0)
# Get image resolution from this
IMAGE_RESOLUTION_w, IMAGE_RESOLUTION_h = sign_cross.shape[::-1]
# Arrows
arrow_list = list()
ARROW_DEGREE_DIFF = 180//(NUMBER_OF_ARROWS-1)
for i in range(NUMBER_OF_ARROWS):
index_string = str(i)
if len(index_string) == 1:
index_string = '0' + index_string
arrow_list.append(ndimage.median_filter(cv2.imread(PATH_ARROW_FOLDER + index_string + '.png', 0), 5))
# Set up matrix and stuff
if DETECT_FLOOR:
imgPts = np.load(PATH_IMAGE_POINTS)
objPts = np.load(PATH_OBJECT_POINTS)
resolution = (640,480)
H, mask = cv2.findHomography(imgPts, objPts)
# Get initial crop image
crop_img = np.zeros((320,240,3), np.uint8)
# create a CLAHE object (Arguments are optional).
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
# Set some variables for movement and turning
moving_forward, wait_once, creep_once, confident_match = False, False, False, False
wheel_direction = "neutral"
arrow_buffer = list()
turn_counter = 0
# Stream the feed
buggy.streamFeed()
# Wait for feed to start...
wait()
# Loop
try:
if DISPLAY:
cv2.namedWindow('detected_circles',cv2.WINDOW_NORMAL)
cv2.namedWindow('cropped_image',cv2.WINDOW_NORMAL)
cv2.namedWindow('best_match',cv2.WINDOW_NORMAL)
if DETECT_WALL:
cv2.namedWindow('Chroma_red_normal',cv2.WINDOW_NORMAL)
if DETECT_FLOOR:
cv2.namedWindow('birds_eye',cv2.WINDOW_NORMAL)
cv2.namedWindow('Chroma_red_birds',cv2.WINDOW_NORMAL)
while True:
# Get current Image
current_image = buggy.getCurrentImage()
while current_image is None:
current_image = buggy.getCurrentImage()
display_image = np.copy(current_image)
try:
# Set circles to None
circles = None
circle_on_floor = False
# DETECT ON FLOOR (priority if both checked)
if DETECT_FLOOR:
# Get birds eye Perspective
birds_eye_Homography = cv2.warpPerspective(current_image, H, resolution, cv.CV_INTER_LINEAR | cv.CV_WARP_INVERSE_MAP | cv.CV_WARP_FILL_OUTLIERS)
birds_eye_copy = np.copy(birds_eye_Homography)
# YCrCb
YCrCb = cv2.cvtColor(birds_eye_Homography, cv2.COLOR_BGR2YCR_CB)
Chroma_red = YCrCb[:,:,1]
# Threshold it
_, Chroma_red = cv2.threshold(Chroma_red,CHROMA_RED_THRESHOLD,255,cv2.THRESH_BINARY)
# Blur it to remove noise
Chroma_red = ndimage.median_filter(Chroma_red, 9)
try:
# Circles
circles = cv2.HoughCircles(Chroma_red,cv.CV_HOUGH_GRADIENT,1,300,
param1=40,param2=20,minRadius=30,maxRadius=100)
circles = np.uint16(np.around(circles))
# Set circle found on floor
circle_on_floor = True
except:
pass
# If set to detect walls, and has not found any circles
if DETECT_WALL and (circles is None or not len(circles[0,:])):
# YCrCb
YCrCb2 = cv2.cvtColor(current_image, cv2.COLOR_BGR2YCR_CB)
Chroma_red_normal = YCrCb2[:,:,1]
# Threshold it
_, Chroma_red_normal = cv2.threshold(Chroma_red_normal,CHROMA_RED_THRESHOLD,255,cv2.THRESH_BINARY)
# Blur it to remove noise
Chroma_red_normal = ndimage.median_filter(Chroma_red_normal, 9)
try:
# Circles
circles = cv2.HoughCircles(Chroma_red_normal,cv.CV_HOUGH_GRADIENT,1,300,
param1=40,param2=20,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
# Set circle found on wall
circle_on_floor = False
except:
pass
if len(circles[0,:]): # If detected a circle:
# Grab the circle as a variable
i = circles[0,0,:]
circle_x, circle_y = i[0], i[1]
y1 = 0 if i[1] < i[2] else i[1]-i[2]
x1 = 0 if i[0] < i[2] else i[0]-i[2] # TO AVOID OVERFLOW (using ushort_scalars)
y2, x2 = i[1]+i[2], i[0]+i[2] # These are the points of the cropped image
# Draw Circle and get cropped sign (depending if on wall or floor)
if circle_on_floor:
# draw the outer circle
cv2.circle(birds_eye_copy,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(birds_eye_copy,(i[0],i[1]),2,(0,0,255),3)
# Get actualy crop image
crop_img_rgb = birds_eye_Homography[y1:y2, x1:x2]
else:
# draw the outer circle
cv2.circle(display_image,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(display_image,(i[0],i[1]),2,(0,0,255),3)
# Get actualy crop image
crop_img_rgb = current_image[y1:y2, x1:x2]
crop_img_rgb = cv2.resize(crop_img_rgb, (IMAGE_RESOLUTION_w, IMAGE_RESOLUTION_h))
crop_img_rgb = cv2.cvtColor(crop_img_rgb,cv2.COLOR_BGR2GRAY)
_, crop_img_rgb = cv2.threshold(crop_img_rgb,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
crop_img_rgb = ndimage.median_filter(crop_img_rgb, 5)
# Threshold
cross_threshold = 0.7
# Cross Result
res_cross = cv2.matchTemplate(crop_img_rgb,sign_cross,cv2.TM_CCOEFF_NORMED)
# Find location
loc_cross = np.where( res_cross >= cross_threshold )
if loc_cross[1].any():
# Stop (if not already stationary)
if wheel_direction is not "neutral":
wheel_direction = "neutral"
buggy.straightenWheels()
if moving_forward:
print "Stopping!"
moving_forward = False
del arrow_buffer[:]
else:
# If y at high point, don't move (optimum position)
if not circle_on_floor or circle_y > 350:
# Try all the arrows
best_result = 0
best_arrow = None
for i in range(NUMBER_OF_ARROWS):
# Arrow Result
max_res_arrow = np.amax(cv2.matchTemplate(crop_img_rgb,arrow_list[i],cv2.TM_CCOEFF_NORMED))
if max_res_arrow > best_result:
best_result = max_res_arrow
best_arrow = i
# Add best value to arrow buffer
if best_arrow is not None:
arrow_buffer.append(best_arrow)
# If more than MAX, go with average
if len(arrow_buffer) >= MAX_RECOGNISING_COUNT:
print "Taking average arrow direction...", arrow_buffer
best_arrow = mean(arrow_buffer)
# Follow arrow
moving_forward, wheel_direction, confident_match, arrow_buffer = follow_arrow_direction(best_arrow, moving_forward, wheel_direction, arrow_buffer)
buggy.clearBuffer() # Moved, so clear the buffer
else:
if len(arrow_buffer) == 1:
print "Learning direction of arrow..."
wait_once = True
else:
# Move towards sign
print "Moving towards sign..."
if circle_x > 160:
buggy.turnRight((circle_x/320)*100)
else:
buggy.turnLeft((circle_x/160)*100)
creep_once, moving_forward = True, False
else:
# Move towards largest contour
# Countours (already threshed)
contours, hierarchy = cv2.findContours(Chroma_red, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Find the index of the largest contour
areas = [cv2.contourArea(c) for c in contours]
if areas:
max_index = np.argmax(areas)
cnt = contours[max_index]
# Straight Bounding Rectangle
x,y,w,h = cv2.boundingRect(cnt)
center_x = x + w/2
# Display bounding box on chroma red channel
cv2.rectangle(Chroma_red,(x,y),(x+w,y+h),(0,255,0),2)
# Move towards sign
print "Moving towards red object..."
if center_x > 160:
buggy.turnRight((center_x/320)*100)
else:
buggy.turnLeft((center_x/160)*100)
creep_once, moving_forward = True, False
else:
#No contours
print "Can't detect anything"
del arrow_buffer[:]
except:
pass
""" MOVING THE BUGGY (Don't delete!) """
if (moving_forward and not wait_once) or (creep_once):
for x in range(10):
wait()
if confident_match:
buggy.forward(1)
for x in range(10):
wait()
confident_match = False
else:
buggy.creep()
if creep_once:
creep_once = False
elif buggy.isStuck(mean_t=0.98, std_t=0.01):
print "I think I'm stuck! Randomised reverse..."
buggy.randomisedRecovery()
elif turn_counter >= MAX_TURN_STEPS:
# Set wheel direction to neutral
wheel_direction = "neutral"
buggy.straightenWheels()
turn_counter = 0
elif wheel_direction is not "neutral":
# Increment Turn Counter
turn_counter += 1
elif wait_once:
wait_once = False
""" DISPLAYING IMAGE """
if DISPLAY:
# Show
try:
cv2.imshow('detected_circles',display_image)
except:
pass
if DETECT_FLOOR:
try:
cv2.imshow('birds_eye',birds_eye_copy)
except:
pass
try:
cv2.imshow('Chroma_red_birds', Chroma_red)
except:
pass
if DETECT_WALL:
try:
cv2.imshow('Chroma_red_normal', Chroma_red_normal)
except:
pass
try:
cv2.imshow('cropped_image',crop_img_rgb)
except:
pass
try:
cv2.imshow('best_match',arrow_list[best_arrow])
except:
pass
k = cv2.waitKey(33)
if k==27: # Esc key to stop
cv2.destroyAllWindows()
break
# Wait
wait()
except KeyboardInterrupt:
print "Keyboard Interrupt Detected!"
finally:
# Stop the video feed
buggy.exitBuggy()
#exit()
| gpl-2.0 |
PAIR-code/recommendation-rudders | hyperbolic-rs/rudders/graph/analysis/plot_hyperbolicity.py | 1 | 1050 | import argparse
import logging
import numpy as np
import matplotlib.pyplot as plt
from utils import annotate_vline, remove_extensions
parser = argparse.ArgumentParser(description='Plot delta-hyperbolicities')
parser.add_argument(
'--input', type=str, required=True, help='The hyperbolicities file.')
args = parser.parse_args()
# Load the values.
values = np.load(args.input)
counts = np.load(args.input.replace('values', 'counts'))
counts = counts / np.sum(counts)
h_mean = np.sum(values * counts)
h_max = np.max(values)
# Plot the hyperbolicities.
plt.bar(values, counts, align='center', width=0.25, label='h (sampled)')
annotate_vline(h_mean, f'Mean: {h_mean:.2f}', color='tab:orange', lw=2)
annotate_vline(h_max, f'Max: {h_max:.2f}', left=False, color='r', lw=2)
plt.xlim([-0.5, h_max + 0.5])
plt.xlabel('Hyperbolicity')
plt.ylabel('Relative Frequency')
plt.title('dataset = {}'.format(remove_extensions(args.input)), y=1.08)
# Save the plot.
plt.tight_layout()
plt.savefig(args.input.replace('hyp-values.npy', 'hyperbolicity.png'))
| apache-2.0 |
fengzhyuan/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 105 | 22788 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
BMP-TECH/mavlink | pymavlink/tools/mavgraph.py | 8 | 9809 | #!/usr/bin/env python
'''
graph a MAVLink log file
Andrew Tridgell August 2011
'''
import sys, struct, time, os, datetime
import math, re
import matplotlib
from math import *
from pymavlink.mavextra import *
# cope with rename of raw_input in python3
try:
input = raw_input
except NameError:
pass
colourmap = {
'apm' : {
'MANUAL' : (1.0, 0, 0),
'AUTO' : ( 0, 1.0, 0),
'LOITER' : ( 0, 0, 1.0),
'FBWA' : (1.0, 0.5, 0),
'RTL' : ( 1, 0, 0.5),
'STABILIZE' : (0.5, 1.0, 0),
'LAND' : ( 0, 1.0, 0.5),
'STEERING' : (0.5, 0, 1.0),
'HOLD' : ( 0, 0.5, 1.0),
'ALT_HOLD' : (1.0, 0.5, 0.5),
'CIRCLE' : (0.5, 1.0, 0.5),
'POSITION' : (1.0, 0.0, 1.0),
'GUIDED' : (0.5, 0.5, 1.0),
'ACRO' : (1.0, 1.0, 0),
'CRUISE' : ( 0, 1.0, 1.0)
},
'px4' : {
'MANUAL' : (1.0, 0, 0),
'SEATBELT' : ( 0.5, 0.5, 0),
'EASY' : ( 0, 1.0, 0),
'AUTO' : ( 0, 0, 1.0),
'UNKNOWN' : ( 1.0, 1.0, 1.0)
}
}
edge_colour = (0.1, 0.1, 0.1)
lowest_x = None
highest_x = None
def plotit(x, y, fields, colors=[]):
'''plot a set of graphs using date for x axis'''
global lowest_x, highest_x
pylab.ion()
fig = pylab.figure(num=1, figsize=(12,6))
ax1 = fig.gca()
ax2 = None
xrange = 0.0
for i in range(0, len(fields)):
if len(x[i]) == 0: continue
if lowest_x is None or x[i][0] < lowest_x:
lowest_x = x[i][0]
if highest_x is None or x[i][-1] > highest_x:
highest_x = x[i][-1]
if highest_x is None or lowest_x is None:
return
xrange = highest_x - lowest_x
xrange *= 24 * 60 * 60
formatter = matplotlib.dates.DateFormatter('%H:%M:%S')
interval = 1
intervals = [ 1, 2, 5, 10, 15, 30, 60, 120, 240, 300, 600,
900, 1800, 3600, 7200, 5*3600, 10*3600, 24*3600 ]
for interval in intervals:
if xrange / interval < 15:
break
locator = matplotlib.dates.SecondLocator(interval=interval)
if not args.xaxis:
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
empty = True
ax1_labels = []
ax2_labels = []
for i in range(0, len(fields)):
if len(x[i]) == 0:
print("Failed to find any values for field %s" % fields[i])
continue
if i < len(colors):
color = colors[i]
else:
color = 'red'
(tz, tzdst) = time.tzname
if axes[i] == 2:
if ax2 == None:
ax2 = ax1.twinx()
ax = ax2
if not args.xaxis:
ax2.xaxis.set_major_locator(locator)
ax2.xaxis.set_major_formatter(formatter)
label = fields[i]
if label.endswith(":2"):
label = label[:-2]
ax2_labels.append(label)
else:
ax1_labels.append(fields[i])
ax = ax1
if args.xaxis:
if args.marker is not None:
marker = args.marker
else:
marker = '+'
if args.linestyle is not None:
linestyle = args.linestyle
else:
linestyle = 'None'
ax.plot(x[i], y[i], color=color, label=fields[i],
linestyle=linestyle, marker=marker)
else:
if args.marker is not None:
marker = args.marker
else:
marker = 'None'
if args.linestyle is not None:
linestyle = args.linestyle
else:
linestyle = '-'
ax.plot_date(x[i], y[i], color=color, label=fields[i],
linestyle=linestyle, marker=marker, tz=None)
empty = False
if args.flightmode is not None:
for i in range(len(modes)-1):
c = colourmap[args.flightmode].get(modes[i][1], edge_colour)
ax1.axvspan(modes[i][0], modes[i+1][0], fc=c, ec=edge_colour, alpha=0.1)
c = colourmap[args.flightmode].get(modes[-1][1], edge_colour)
ax1.axvspan(modes[-1][0], ax1.get_xlim()[1], fc=c, ec=edge_colour, alpha=0.1)
if ax1_labels != []:
ax1.legend(ax1_labels,loc=args.legend)
if ax2_labels != []:
ax2.legend(ax2_labels,loc=args.legend2)
if empty:
print("No data to graph")
return
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--no-timestamps", dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--planner", action='store_true', help="use planner file format")
parser.add_argument("--condition", default=None, help="select packets by a condition")
parser.add_argument("--labels", default=None, help="comma separated field labels")
parser.add_argument("--legend", default='upper left', help="default legend position")
parser.add_argument("--legend2", default='upper right', help="default legend2 position")
parser.add_argument("--marker", default=None, help="point marker")
parser.add_argument("--linestyle", default=None, help="line style")
parser.add_argument("--xaxis", default=None, help="X axis expression")
parser.add_argument("--multi", action='store_true', help="multiple files with same colours")
parser.add_argument("--zero-time-base", action='store_true', help="use Z time base for DF logs")
parser.add_argument("--flightmode", default=None,
help="Choose the plot background according to the active flight mode of the specified type, e.g. --flightmode=apm for ArduPilot or --flightmode=px4 for PX4 stack logs. Cannot be specified with --xaxis.")
parser.add_argument("--dialect", default="ardupilotmega", help="MAVLink dialect")
parser.add_argument("--output", default=None, help="provide an output format")
parser.add_argument("--timeshift", type=float, default=0, help="shift time on first graph in seconds")
parser.add_argument("logs_fields", metavar="<LOG or FIELD>", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
if args.flightmode is not None and args.xaxis:
print("Cannot request flightmode backgrounds with an x-axis expression")
sys.exit(1)
if args.flightmode is not None and args.flightmode not in colourmap:
print("Unknown flight controller '%s' in specification of --flightmode" % args.flightmode)
sys.exit(1)
if args.output is not None:
matplotlib.use('Agg')
import pylab
filenames = []
fields = []
for f in args.logs_fields:
if os.path.exists(f):
filenames.append(f)
else:
fields.append(f)
msg_types = set()
multiplier = []
field_types = []
colors = [ 'red', 'green', 'blue', 'orange', 'olive', 'black', 'grey', 'yellow', 'brown', 'darkcyan', 'cornflowerblue', 'darkmagenta', 'deeppink', 'darkred']
# work out msg types we are interested in
x = []
y = []
modes = []
axes = []
first_only = []
re_caps = re.compile('[A-Z_][A-Z0-9_]+')
for f in fields:
caps = set(re.findall(re_caps, f))
msg_types = msg_types.union(caps)
field_types.append(caps)
y.append([])
x.append([])
axes.append(1)
first_only.append(False)
def add_data(t, msg, vars, flightmode):
'''add some data'''
mtype = msg.get_type()
if args.flightmode is not None and (len(modes) == 0 or modes[-1][1] != flightmode):
modes.append((t, flightmode))
if mtype not in msg_types:
return
for i in range(0, len(fields)):
if mtype not in field_types[i]:
continue
f = fields[i]
if f.endswith(":2"):
axes[i] = 2
f = f[:-2]
if f.endswith(":1"):
first_only[i] = True
f = f[:-2]
v = mavutil.evaluate_expression(f, vars)
if v is None:
continue
if args.xaxis is None:
xv = t
else:
xv = mavutil.evaluate_expression(args.xaxis, vars)
if xv is None:
continue
y[i].append(v)
x[i].append(xv)
def process_file(filename, timeshift):
'''process one file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps, zero_time_base=args.zero_time_base, dialect=args.dialect)
vars = {}
while True:
msg = mlog.recv_match(args.condition)
if msg is None: break
tdays = matplotlib.dates.date2num(datetime.datetime.fromtimestamp(msg._timestamp+timeshift))
add_data(tdays, msg, mlog.messages, mlog.flightmode)
if len(filenames) == 0:
print("No files to process")
sys.exit(1)
if args.labels is not None:
labels = args.labels.split(',')
if len(labels) != len(fields)*len(filenames):
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*len(filenames)))
sys.exit(1)
else:
labels = None
timeshift = args.timeshift
for fi in range(0, len(filenames)):
f = filenames[fi]
process_file(f, timeshift)
timeshift = 0
for i in range(0, len(x)):
if first_only[i] and fi != 0:
x[i] = []
y[i] = []
if labels:
lab = labels[fi*len(fields):(fi+1)*len(fields)]
else:
lab = fields[:]
if args.multi:
col = colors[:]
else:
col = colors[fi*len(fields):]
plotit(x, y, lab, colors=col)
for i in range(0, len(x)):
x[i] = []
y[i] = []
if args.output is None:
pylab.show()
pylab.draw()
input('press enter to exit....')
else:
pylab.legend(loc=2,prop={'size':8})
pylab.savefig(args.output, bbox_inches='tight', dpi=200)
| lgpl-3.0 |
abhishekgahlot/scikit-learn | sklearn/ensemble/tests/test_base.py | 28 | 1334 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
"""Check BaseEnsemble methods."""
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
"""Check that instantiating a BaseEnsemble with n_estimators<=0 raises
a ValueError."""
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
cimat/data-visualization-patterns | display-patterns/Discrete Quantities/Pruebas/A36Span_Chart_Pyqtgraph.py | 1 | 1080 | from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
from datos import data
import pandas as pd
d=data('mtcars')
subset1, subset2, subset3= d[d.cyl==4], d[d.cyl==6], d[d.cyl==8]
datos=pd.DataFrame ({'Max': [max(subset1.mpg), max(subset2.mpg), max(subset3.mpg)],
'Min': [min(subset1.mpg), min(subset2.mpg), min(subset3.mpg)],
'Span': [max(subset1.mpg)-min(subset1.mpg), max(subset2.mpg)-min(subset2.mpg), max(subset3.mpg)-min(subset3.mpg)]})
datos.index=[4,6,8]
bar_width = 0.8
win = pg.plot(title='Simple Bar Chart')
bg1 = pg.BarGraphItem(x=datos.index, height=datos.Max, width=bar_width, brush=(96,255,96), pen='k')
bg2 = pg.BarGraphItem(x=datos.index, height=datos.Min, width=bar_width, brush='k', pen='k' )
win.addItem(bg1)
win.addItem(bg2)
win.setTitle('Range of Milles per Gallon (mpg) by Cylindres (cyl) ')
win.setLabel('left', "Cylindres", )
win.setLabel('bottom', "Milles per Gallon")
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_() | cc0-1.0 |
fireball-QMD/progs | pyfb/geometry/dinamic.py | 1 | 5380 | from pyfb.geometry.step import step
from pyfb.geometry.atom import atom
import pandas as pd
import numpy as np
class dinamic:
def __init__(self):
self.step=[]
#lee las cargas de cada atomo despues de las posiciones:
# x y z Qtot qs qp qd .....
self.out=[]
def print_total_steps(self):
print(len(self.step))
def append(self,step):
self.step.append(step)
def print(self):
for i in self.step:
i.print()
def print_bas_format(self):
for i in self.step:
i.print_bas_format()
def print_charges(self):
for i in self.step:
i.print_charges()
def print_2line(self):
for i in self.step:
print(i.line2)
def print_name(self):
for i in self.step:
print(i.name)
def print_out(self):
#print(self.out)
for i in range(len(self.out[0])):
a=""
for j in range(len(self.out)):
a=a+'{0:12.6f} '.format(self.out[j][i])
print(a)
def dintra_matrix(self):
for bas in self.step:
out=[]
col=[]
for iatom in bas.atom:
col.append(iatom.Z)
aux=[]
for jatom in bas.atom:
aux.append(iatom.distancia(jatom))
out.append(aux)
c=np.array(out)
df2 = pd.DataFrame(c,index=col,columns=col)
print(df2)
def get(self,info,col):
count=0.0
aux=[]
salida=[]
for i in self.step:
if info == '-rescal':
for j in i.atom:
j.r[0]=j.r[0]*float(col[0])
j.r[1]=j.r[1]*float(col[0])
j.r[2]=j.r[2]*float(col[0])
if info == '-x':
salida.append(i.atom[col[0]-1].r[0])
if info == '-y':
salida.append(i.atom[col[0]-1].r[1])
if info == '-z':
salida.append(i.atom[col[0]-1].r[2])
if info == '-dx':
i.atom[col[0]-1].r[0]=i.atom[col[0]-1].r[0]+float(col[1])
if info == '-dy':
i.atom[col[0]-1].r[1]=i.atom[col[0]-1].r[1]+float(col[1])
if info == '-dz':
i.atom[col[0]-1].r[2]=i.atom[col[0]-1].r[2]+float(col[1])
if info == '-X':
if int(count)==0:
salida.append(float(i.atom[col[0]-1].r[0]))
else:
salida.append(float(i.atom[col[0]-1].r[0])/(count+1)+float(salida[int(count-1)])*count/(count+1))
if info == '-Y':
if int(count)==0:
salida.append(float(i.atom[col[0]-1].r[1]))
else:
salida.append(float(i.atom[col[0]-1].r[1])/(count+1)+float(salida[int(count-1)])*count/(count+1))
if info == '-Z':
if int(count)==0:
salida.append(float(i.atom[col[0]-1].r[2]))
else:
salida.append(float(i.atom[col[0]-1].r[2])/(count+1)+float(salida[int(count-1)])*count/(count+1))
if info == '-d':
salida.append(float(i.atom[col[0]-1].distancia(i.atom[col[1]-1])))
if info == '-D':
if int(count)==0:
salida.append(float(i.atom[col[0]-1].distancia(i.atom[col[1]-1])))
else:
salida.append(float(i.atom[col[0]-1].distancia(i.atom[col[1]-1]))/(count+1)+float(salida[int(count-1)])*count/(count+1))
if info == '-ang':
salida.append(float(i.atom[col[0]-1].ang(i.atom[col[1]-1],i.atom[col[2]-1])))
if info == '-ANG':
if int(count)==0:
salida.append(float(i.atom[col[0]-1].ang(i.atom[col[1]-1],i.atom[col[2]-1])))
else:
salida.append(float(i.atom[col[0]-1].ang(i.atom[col[1]-1],i.atom[col[2]-1]))/(count+1)+float(salida[int(count-1)])*count/(count+1))
count=count+1
# print(salida)
self.out.append(salida)
def loadxyz(self,archivo,name=""):
natoms = 0
istep = 0
text=open(archivo).readlines()
nmaxlines=len(text)
i=0
while i < nmaxlines:
line=text[i].split()
bas=step()
istep=istep+1
if name != "":
bas.name=name
else:
bas.name="step = "+str(istep)
if i == 0 :
natoms = int(line[0])
i=i+1
bas.line2=text[i]
for j in range(natoms):
i=i+1
line=text[i].split()
a=line[0]
ra=[]
ra.append(float(line[1]))
ra.append(float(line[2]))
ra.append(float(line[3]))
bas.append(atom(a,ra))
i=i+1 #natom
i=i+1 #line2
self.append(bas)
def loadstep(self,archivo,istep):
natoms = 0
text=open(archivo).readlines()
nmaxlines=len(text)
i=0
while i < nmaxlines:
line=text[i].split()
bas=step()
bas.name="step = "+str(istep)
if i == 0 :
natoms = int(line[0])
i=(istep-1)*(natoms+2)+1
bas.line2=text[i]
for j in range(natoms):
i=i+1
line=text[i].split()
a=line[0]
ra=[]
ra.append(float(line[1]))
ra.append(float(line[2]))
ra.append(float(line[3]))
bas.append(atom(a,ra))
i=nmaxlines
self.append(bas)
def laststep(self,archivo,read_charges):
natoms = 0
text = open(archivo).readlines()
nmaxlines = len(text)
natoms = int(text[0])
bas=step()
i=nmaxlines-(natoms+1)
bas.line2=text[i]
for j in range(natoms):
i=i+1
line=text[i].split()
a=line[0]
ra=[]
ra.append(float(line[1]))
ra.append(float(line[2]))
ra.append(float(line[3]))
bas.append(atom(a,ra))
if read_charges: #load after read de step
bas.loadcharges()
self.append(bas)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.