repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
TaxIPP-Life/Til | til/data/archives/Patrimoine/test_matching.py | 2 | 6752 | # -*- coding:utf-8 -*-
'''
Created on 25 juil. 2013
@author: a.eidelman
'''
import pandas as pd
import pdb
import numpy as np
import time
# #TODO: J'aime bien l'idée de regarder sur les valeurs potentielles, de séléctionner la meilleure et de prendre quelqu'un dans cette case.
# # L'avantage c'est qu'au lieu de regarder sur tous les individus, on regarde sur les valeurs, on a potentiellement plusieurs gens par
# # case donc moins de case que de gens ce qui peut améliorer le temps de calcul.
# age_groups = table2['age'].unique()
# dip_groups = table2['diploma'].unique()
# group_combinations = [(ag, dip) for ag in age_groups for dip in dip_groups]
#
# def iter_key(age_grp, dip_grp, age, dip):
# age_av = sum(age_grp) / 2.0
# dip_av = sum(dip_grp) / 2.0
# return pow(age - age_av, 2) + pow(dip - dip_av, 2)
#
# def iterate_buckets(age, dip):
# combs = sorted(group_combinations)
# for c in combs:
# yield c
#
# def match_key(indiv1, indiv2):
# age1, dip1 = indiv1
# age2, dip2 = indiv2
# return pow(age1 - age2, 2) + pow(dip1 - dip2, 2)
#
# pdb.set_trace()
#
#
# # def get_best_match(matches, age, dip):
# # sorted_matches = sorted(key=match_key, zip(matches, [(age, dip)] * len(match)))
# # return sorted_matches[0]
#
# pdb.set_trace()
#
# for indiv in table1:
# print indiv
#
# for individual in table1:
# age, diploma = individual
# for age_bucket, dip_bucket in iterate_buckets(table2['age'], table2['diploma']):
# matches = age_bucket.intersection(dip_bucket)
# if matches:
# match = get_best_match(matches, age, diploma)
# all_matches.append((individual, match))
# remove_individual(age_groups, match)
# remove_individual(dip_groups, match)
# matching()
# import cProfile
# command = """matching()"""
# cProfile.runctx( command, globals(), locals(), filename="matching.profile1" )
#### temps de calcul en fonction de la base
def run_time(n):
table2 = pd.DataFrame(np.random.randint(0,100, [n,2]), columns=['age','diploma'])
table1 = pd.DataFrame(np.random.randint(0,100, [n,2]), columns=['age','diploma'])
score_str = "(table2['age']-temp['age'])**2 + 5*(table2['diploma']-temp['diploma'])"
match = pd.Series(0, index=table1.index)
index2 = pd.Series(True, index=table2.index)
k_max = min(len(table2), len(table1))
debut = time.clock()
for k in xrange(k_max):
temp = table1.iloc[k]
score = eval(score_str)
score = score[index2]
idx2 = score.idxmax()
match.iloc[k] = idx2 # print( k, 0, index2)
index2[idx2] = False
print 'taille: ',n,' ; temps de calcul: ', time.clock()-debut
return time.clock()-debut
def run_time_cell(n):
table2 = pd.DataFrame(np.random.randint(0,100, [n,2]), columns=['age','diploma'])
table1 = pd.DataFrame(np.random.randint(0,100, [n,2]), columns=['age','diploma'])
match = pd.Series(0, index=table1.index)
index2 = pd.Series(True, index=table2.index)
k_max = min(len(table2), len(table1))
age_groups = table2['age'].unique()
dip_groups = table2['diploma'].unique()
group_combinations = np.array([[ag, dip] for ag in age_groups for dip in dip_groups])
groups2 = table2.groupby(['age','diploma'])
cell_values = pd.DataFrame(groups2.groups.keys())
temp = pd.DataFrame(groups2.size())
temp = temp.rename(columns={0:'nb'})
cell_values = cell_values.merge(temp, left_on=[0,1], right_index=True)
score_str = "(cell_values[0]-temp['age'])**2 + 5*(cell_values[1]-temp['diploma'])"
debut = time.clock()
for k in xrange(len(table1)):
temp = table1.iloc[k]
score = eval(score_str)
idx2 = score.idxmax()
match.iloc[k] = idx2 # print( k, 0, index2)
cell_values.loc[idx2,'nb'] -= 1
if cell_values.loc[idx2,'nb']==0:
cell_values = cell_values.drop(idx2, axis=0)
print 'taille: ',n,' ; temps de calcul: ', time.clock()-debut
return time.clock()-debut
def run_time_np(n):
table2 = np.random.randint(0,100, [n,2])
table1 = np.random.randint(0,100, [n,2])
idx2 = np.array([np.arange(n)])
table2 = np.concatenate((table2, idx2.T), axis=1)
match = np.empty(n, dtype=int)
k_max = min(len(table2), len(table1))
score_str = "(table2[:,0]-temp[0])**2 + 5*(table2[:,1]-temp[1])"
k_max = min(len(table2), len(table1))
debut = time.clock()
for k in xrange(k_max):
temp = table1[k]
score = eval(score_str)
idx = score.argmax()
idx2 = table2[idx,2]
match[k] = idx2
table2 = np.delete(table2, idx, 0)
print 'taille: ',n,' ; temps de calcul: ', time.clock()-debut
return time.clock()-debut
def run_time_np_cell(n):
table2 = pd.DataFrame(np.random.randint(0,100, [n,2]), columns=['age','diploma'])
table1 = pd.DataFrame(np.random.randint(0,100, [n,2]), columns=['age','diploma'])
match = pd.Series(0, index=table1.index)
index2 = pd.Series(True, index=table2.index)
k_max = min(len(table2), len(table1))
age_groups = table2['age'].unique()
dip_groups = table2['diploma'].unique()
group_combinations = np.array([[ag, dip] for ag in age_groups for dip in dip_groups])
groups2 = table2.groupby(['age','diploma'])
cell_values = pd.DataFrame(groups2.groups.keys())
temp = pd.DataFrame(groups2.size())
temp = temp.rename(columns={0:'nb'})
cell_values = cell_values.merge(temp, left_on=[0,1], right_index=True)
cell_values['idx'] = range(len(cell_values))
table1 = np.array(table1)
cell_values = np.array(cell_values)
match = np.empty(n, dtype=int)
score_str = "(cell_values[:,0]-temp[0])**2 + 5*(cell_values[:,1]-temp[1])"
k_max = len(table1)
debut = time.clock()
for k in xrange(k_max):
temp = table1[k]
score = eval(score_str)
idx = score.argmax()
idx2 = cell_values[idx,3]
match[k] = idx2
cell_values[idx,2] -= 1
if cell_values[idx,2]==0:
cell_values = np.delete(cell_values, idx, 0)
print 'taille: ',n,' ; temps de calcul: ', time.clock()-debut
pdb.set_trace()
return time.clock()-debut
temps = {}
sizes = [500000,1500000,2000000,1000000,2500000]
#[1500000,2000000,1000000,2500000]
#[20000,25000,30000,35000,40000,45000,50000,75000,100000]
# [1000,3000,5000,7000,8000,10000,12500,15000]
#20000,25000,30000,35000,40000,45000,50000,75000,100000
for size in sizes:
temps[str(size)] = run_time_np_cell(size)
#
# for size in sizes:
# temps[str(size)] = run_time(size)
#
| gpl-3.0 |
Srisai85/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
anntzer/scikit-learn | sklearn/datasets/tests/test_openml.py | 1 | 52574 | """Test the openml loader.
"""
import gzip
import warnings
import json
import os
import re
from io import BytesIO
import numpy as np
import scipy.sparse
import sklearn
import pytest
from sklearn import config_context
from sklearn.datasets import fetch_openml
from sklearn.datasets._openml import (_open_openml_url,
_arff,
_DATA_FILE,
_convert_arff_data,
_convert_arff_data_dataframe,
_get_data_description_by_id,
_get_local_path,
_retry_with_clean_cache,
_feature_to_dtype)
from sklearn.utils._testing import (assert_warns_message,
assert_raise_message)
from sklearn.utils import is_scalar_nan
from sklearn.utils._testing import assert_allclose, assert_array_equal
from urllib.error import HTTPError
from sklearn.datasets.tests.test_common import check_return_X_y
from sklearn.externals._arff import ArffContainerType
from functools import partial
from sklearn.utils._testing import fails_if_pypy
currdir = os.path.dirname(os.path.abspath(__file__))
# if True, urlopen will be monkey patched to only use local files
test_offline = True
def _test_features_list(data_id):
# XXX Test is intended to verify/ensure correct decoding behavior
# Not usable with sparse data or datasets that have columns marked as
# {row_identifier, ignore}
def decode_column(data_bunch, col_idx):
col_name = data_bunch.feature_names[col_idx]
if col_name in data_bunch.categories:
# XXX: This would be faster with np.take, although it does not
# handle missing values fast (also not with mode='wrap')
cat = data_bunch.categories[col_name]
result = [None if is_scalar_nan(idx) else cat[int(idx)]
for idx in data_bunch.data[:, col_idx]]
return np.array(result, dtype='O')
else:
# non-nominal attribute
return data_bunch.data[:, col_idx]
data_bunch = fetch_openml(data_id=data_id, cache=False,
target_column=None, as_frame=False)
# also obtain decoded arff
data_description = _get_data_description_by_id(data_id, None)
sparse = data_description['format'].lower() == 'sparse_arff'
if sparse is True:
raise ValueError('This test is not intended for sparse data, to keep '
'code relatively simple')
url = _DATA_FILE.format(data_description['file_id'])
with _open_openml_url(url, data_home=None) as f:
data_arff = _arff.load((line.decode('utf-8') for line in f),
return_type=(_arff.COO if sparse
else _arff.DENSE_GEN),
encode_nominal=False)
data_downloaded = np.array(list(data_arff['data']), dtype='O')
for i in range(len(data_bunch.feature_names)):
# XXX: Test per column, as this makes it easier to avoid problems with
# missing values
np.testing.assert_array_equal(data_downloaded[:, i],
decode_column(data_bunch, i))
def _fetch_dataset_from_openml(data_id, data_name, data_version,
target_column,
expected_observations, expected_features,
expected_missing,
expected_data_dtype, expected_target_dtype,
expect_sparse, compare_default_target):
# fetches a dataset in three various ways from OpenML, using the
# fetch_openml function, and does various checks on the validity of the
# result. Note that this function can be mocked (by invoking
# _monkey_patch_webbased_functions before invoking this function)
data_by_name_id = fetch_openml(name=data_name, version=data_version,
cache=False, as_frame=False)
assert int(data_by_name_id.details['id']) == data_id
# Please note that cache=False is crucial, as the monkey patched files are
# not consistent with reality
with warnings.catch_warnings():
# See discussion in PR #19373
# Catching UserWarnings about multiple versions of dataset
warnings.simplefilter("ignore", category=UserWarning)
fetch_openml(name=data_name, cache=False, as_frame=False)
# without specifying the version, there is no guarantee that the data id
# will be the same
# fetch with dataset id
data_by_id = fetch_openml(data_id=data_id, cache=False,
target_column=target_column, as_frame=False)
assert data_by_id.details['name'] == data_name
assert data_by_id.data.shape == (expected_observations, expected_features)
if isinstance(target_column, str):
# single target, so target is vector
assert data_by_id.target.shape == (expected_observations, )
assert data_by_id.target_names == [target_column]
elif isinstance(target_column, list):
# multi target, so target is array
assert data_by_id.target.shape == (expected_observations,
len(target_column))
assert data_by_id.target_names == target_column
assert data_by_id.data.dtype == expected_data_dtype
assert data_by_id.target.dtype == expected_target_dtype
assert len(data_by_id.feature_names) == expected_features
for feature in data_by_id.feature_names:
assert isinstance(feature, str)
# TODO: pass in a list of expected nominal features
for feature, categories in data_by_id.categories.items():
feature_idx = data_by_id.feature_names.index(feature)
values = np.unique(data_by_id.data[:, feature_idx])
values = values[np.isfinite(values)]
assert set(values) <= set(range(len(categories)))
if compare_default_target:
# check whether the data by id and data by id target are equal
data_by_id_default = fetch_openml(data_id=data_id, cache=False,
as_frame=False)
np.testing.assert_allclose(data_by_id.data, data_by_id_default.data)
if data_by_id.target.dtype == np.float64:
np.testing.assert_allclose(data_by_id.target,
data_by_id_default.target)
else:
assert np.array_equal(data_by_id.target, data_by_id_default.target)
if expect_sparse:
assert isinstance(data_by_id.data, scipy.sparse.csr_matrix)
else:
assert isinstance(data_by_id.data, np.ndarray)
# np.isnan doesn't work on CSR matrix
assert (np.count_nonzero(np.isnan(data_by_id.data)) ==
expected_missing)
# test return_X_y option
fetch_func = partial(fetch_openml, data_id=data_id, cache=False,
target_column=target_column, as_frame=False)
check_return_X_y(data_by_id, fetch_func)
return data_by_id
class _MockHTTPResponse:
def __init__(self, data, is_gzip):
self.data = data
self.is_gzip = is_gzip
def read(self, amt=-1):
return self.data.read(amt)
def close(self):
self.data.close()
def info(self):
if self.is_gzip:
return {'Content-Encoding': 'gzip'}
return {}
def __iter__(self):
return iter(self.data)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return False
def _monkey_patch_webbased_functions(context,
data_id,
gzip_response):
# monkey patches the urlopen function. Important note: Do NOT use this
# in combination with a regular cache directory, as the files that are
# stored as cache should not be mixed up with real openml datasets
url_prefix_data_description = "https://openml.org/api/v1/json/data/"
url_prefix_data_features = "https://openml.org/api/v1/json/data/features/"
url_prefix_download_data = "https://openml.org/data/v1/"
url_prefix_data_list = "https://openml.org/api/v1/json/data/list/"
path_suffix = '.gz'
read_fn = gzip.open
def _file_name(url, suffix):
return (re.sub(r'\W', '-', url[len("https://openml.org/"):])
+ suffix + path_suffix)
def _mock_urlopen_data_description(url, has_gzip_header):
assert url.startswith(url_prefix_data_description)
path = os.path.join(currdir, 'data', 'openml', str(data_id),
_file_name(url, '.json'))
if has_gzip_header and gzip_response:
with open(path, 'rb') as f:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
with read_fn(path, 'rb') as f:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_features(url, has_gzip_header):
assert url.startswith(url_prefix_data_features)
path = os.path.join(currdir, 'data', 'openml', str(data_id),
_file_name(url, '.json'))
if has_gzip_header and gzip_response:
with open(path, 'rb') as f:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
with read_fn(path, 'rb') as f:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_download_data(url, has_gzip_header):
assert (url.startswith(url_prefix_download_data))
path = os.path.join(currdir, 'data', 'openml', str(data_id),
_file_name(url, '.arff'))
if has_gzip_header and gzip_response:
with open(path, 'rb') as f:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
with read_fn(path, 'rb') as f:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen_data_list(url, has_gzip_header):
assert url.startswith(url_prefix_data_list)
json_file_path = os.path.join(currdir, 'data', 'openml',
str(data_id), _file_name(url, '.json'))
# load the file itself, to simulate a http error
json_data = json.loads(read_fn(json_file_path, 'rb').
read().decode('utf-8'))
if 'error' in json_data:
raise HTTPError(url=None, code=412,
msg='Simulated mock error',
hdrs=None, fp=None)
if has_gzip_header:
with open(json_file_path, 'rb') as f:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, True)
else:
with read_fn(json_file_path, 'rb') as f:
fp = BytesIO(f.read())
return _MockHTTPResponse(fp, False)
def _mock_urlopen(request):
url = request.get_full_url()
has_gzip_header = request.get_header('Accept-encoding') == "gzip"
if url.startswith(url_prefix_data_list):
return _mock_urlopen_data_list(url, has_gzip_header)
elif url.startswith(url_prefix_data_features):
return _mock_urlopen_data_features(url, has_gzip_header)
elif url.startswith(url_prefix_download_data):
return _mock_urlopen_download_data(url, has_gzip_header)
elif url.startswith(url_prefix_data_description):
return _mock_urlopen_data_description(url, has_gzip_header)
else:
raise ValueError('Unknown mocking URL pattern: %s' % url)
# XXX: Global variable
if test_offline:
context.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
@pytest.mark.parametrize('feature, expected_dtype', [
({'data_type': 'string', 'number_of_missing_values': '0'}, object),
({'data_type': 'string', 'number_of_missing_values': '1'}, object),
({'data_type': 'numeric', 'number_of_missing_values': '0'}, np.float64),
({'data_type': 'numeric', 'number_of_missing_values': '1'}, np.float64),
({'data_type': 'real', 'number_of_missing_values': '0'}, np.float64),
({'data_type': 'real', 'number_of_missing_values': '1'}, np.float64),
({'data_type': 'integer', 'number_of_missing_values': '0'}, np.int64),
({'data_type': 'integer', 'number_of_missing_values': '1'}, np.float64),
({'data_type': 'nominal', 'number_of_missing_values': '0'}, 'category'),
({'data_type': 'nominal', 'number_of_missing_values': '1'}, 'category'),
])
def test_feature_to_dtype(feature, expected_dtype):
assert _feature_to_dtype(feature) == expected_dtype
@pytest.mark.parametrize('feature', [
{'data_type': 'datatime', 'number_of_missing_values': '0'}
])
def test_feature_to_dtype_error(feature):
msg = 'Unsupported feature: {}'.format(feature)
with pytest.raises(ValueError, match=msg):
_feature_to_dtype(feature)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_iris_pandas(monkeypatch):
# classification dataset with numeric only columns
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 61
data_shape = (150, 4)
target_shape = (150, )
frame_shape = (150, 5)
target_dtype = CategoricalDtype(['Iris-setosa', 'Iris-versicolor',
'Iris-virginica'])
data_dtypes = [np.float64] * 4
data_names = ['sepallength', 'sepalwidth', 'petallength', 'petalwidth']
target_name = 'class'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert np.all(data.dtypes == data_dtypes)
assert data.shape == data_shape
assert np.all(data.columns == data_names)
assert np.all(bunch.feature_names == data_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.dtype == target_dtype
assert target.shape == target_shape
assert target.name == target_name
assert target.index.is_unique
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == data_dtypes + [target_dtype])
assert frame.index.is_unique
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_iris_pandas_equal_to_no_frame(monkeypatch):
# as_frame = True returns the same underlying data as as_frame = False
pytest.importorskip('pandas')
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
frame_bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
frame_data = frame_bunch.data
frame_target = frame_bunch.target
norm_bunch = fetch_openml(data_id=data_id, as_frame=False, cache=False)
norm_data = norm_bunch.data
norm_target = norm_bunch.target
assert_allclose(norm_data, frame_data)
assert_array_equal(norm_target, frame_target)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_iris_multitarget_pandas(monkeypatch):
# classification dataset with numeric only columns
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 61
data_shape = (150, 3)
target_shape = (150, 2)
frame_shape = (150, 5)
target_column = ['petalwidth', 'petallength']
cat_dtype = CategoricalDtype(['Iris-setosa', 'Iris-versicolor',
'Iris-virginica'])
data_dtypes = [np.float64, np.float64] + [cat_dtype]
data_names = ['sepallength', 'sepalwidth', 'class']
target_dtypes = [np.float64, np.float64]
target_names = ['petalwidth', 'petallength']
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False,
target_column=target_column)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert np.all(data.dtypes == data_dtypes)
assert data.shape == data_shape
assert np.all(data.columns == data_names)
assert np.all(bunch.feature_names == data_names)
assert bunch.target_names == target_names
assert isinstance(target, pd.DataFrame)
assert np.all(target.dtypes == target_dtypes)
assert target.shape == target_shape
assert np.all(target.columns == target_names)
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == [np.float64] * 4 + [cat_dtype])
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_anneal_pandas(monkeypatch):
# classification dataset with numeric and categorical columns
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 2
target_column = 'class'
data_shape = (11, 38)
target_shape = (11,)
frame_shape = (11, 39)
expected_data_categories = 32
expected_data_floats = 6
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True,
target_column=target_column, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
n_categories = len([dtype for dtype in data.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in data.dtypes if dtype.kind == 'f'])
assert expected_data_categories == n_categories
assert expected_data_floats == n_floats
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert isinstance(target.dtype, CategoricalDtype)
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_cpu_pandas(monkeypatch):
# regression dataset with numeric and categorical columns
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 561
data_shape = (209, 7)
target_shape = (209, )
frame_shape = (209, 8)
cat_dtype = CategoricalDtype(['adviser', 'amdahl', 'apollo', 'basf',
'bti', 'burroughs', 'c.r.d', 'cdc',
'cambex', 'dec', 'dg', 'formation',
'four-phase', 'gould', 'hp', 'harris',
'honeywell', 'ibm', 'ipl', 'magnuson',
'microdata', 'nas', 'ncr', 'nixdorf',
'perkin-elmer', 'prime', 'siemens',
'sperry', 'sratus', 'wang'])
data_dtypes = [cat_dtype] + [np.float64] * 6
feature_names = ['vendor', 'MYCT', 'MMIN', 'MMAX', 'CACH',
'CHMIN', 'CHMAX']
target_name = 'class'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert np.all(data.dtypes == data_dtypes)
assert np.all(data.columns == feature_names)
assert np.all(bunch.feature_names == feature_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert target.dtype == np.float64
assert target.name == target_name
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
def test_fetch_openml_australian_pandas_error_sparse(monkeypatch):
data_id = 292
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
msg = 'Cannot return dataframe with sparse data'
with pytest.raises(ValueError, match=msg):
fetch_openml(data_id=data_id, as_frame=True, cache=False)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_as_frame_auto(monkeypatch):
pd = pytest.importorskip('pandas')
data_id = 61 # iris dataset version 1
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
data = fetch_openml(data_id=data_id, as_frame='auto', cache=False)
assert isinstance(data.data, pd.DataFrame)
data_id = 292 # Australian dataset version 1
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
data = fetch_openml(data_id=data_id, as_frame='auto', cache=False)
assert isinstance(data.data, scipy.sparse.csr_matrix)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_convert_arff_data_dataframe_warning_low_memory_pandas(monkeypatch):
pytest.importorskip('pandas')
data_id = 1119
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
msg = 'Could not adhere to working_memory config.'
with pytest.warns(UserWarning, match=msg):
with config_context(working_memory=1e-6):
fetch_openml(data_id=data_id, as_frame=True, cache=False)
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_adultcensus_pandas_return_X_y(monkeypatch):
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 1119
data_shape = (10, 14)
target_shape = (10, )
expected_data_categories = 8
expected_data_floats = 6
target_column = 'class'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
X, y = fetch_openml(data_id=data_id, as_frame=True, cache=False,
return_X_y=True)
assert isinstance(X, pd.DataFrame)
assert X.shape == data_shape
n_categories = len([dtype for dtype in X.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in X.dtypes if dtype.kind == 'f'])
assert expected_data_categories == n_categories
assert expected_data_floats == n_floats
assert isinstance(y, pd.Series)
assert y.shape == target_shape
assert y.name == target_column
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_adultcensus_pandas(monkeypatch):
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
# Check because of the numeric row attribute (issue #12329)
data_id = 1119
data_shape = (10, 14)
target_shape = (10, )
frame_shape = (10, 15)
expected_data_categories = 8
expected_data_floats = 6
target_column = 'class'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
n_categories = len([dtype for dtype in data.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in data.dtypes if dtype.kind == 'f'])
assert expected_data_categories == n_categories
assert expected_data_floats == n_floats
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert target.name == target_column
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_miceprotein_pandas(monkeypatch):
# JvR: very important check, as this dataset defined several row ids
# and ignore attributes. Note that data_features json has 82 attributes,
# and row id (1), ignore attributes (3) have been removed.
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 40966
data_shape = (7, 77)
target_shape = (7, )
frame_shape = (7, 78)
target_column = 'class'
frame_n_categories = 1
frame_n_floats = 77
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert np.all(data.dtypes == np.float64)
assert isinstance(target, pd.Series)
assert isinstance(target.dtype, CategoricalDtype)
assert target.shape == target_shape
assert target.name == target_column
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
n_categories = len([dtype for dtype in frame.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == 'f'])
assert frame_n_categories == n_categories
assert frame_n_floats == n_floats
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_emotions_pandas(monkeypatch):
# classification dataset with multiple targets (natively)
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 40589
target_column = ['amazed.suprised', 'happy.pleased', 'relaxing.calm',
'quiet.still', 'sad.lonely', 'angry.aggresive']
data_shape = (13, 72)
target_shape = (13, 6)
frame_shape = (13, 78)
expected_frame_categories = 6
expected_frame_floats = 72
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False,
target_column=target_column)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert isinstance(target, pd.DataFrame)
assert target.shape == target_shape
assert np.all(target.columns == target_column)
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
n_categories = len([dtype for dtype in frame.dtypes
if isinstance(dtype, CategoricalDtype)])
n_floats = len([dtype for dtype in frame.dtypes if dtype.kind == 'f'])
assert expected_frame_categories == n_categories
assert expected_frame_floats == n_floats
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
def test_fetch_openml_titanic_pandas(monkeypatch):
# dataset with strings
pd = pytest.importorskip('pandas')
CategoricalDtype = pd.api.types.CategoricalDtype
data_id = 40945
data_shape = (1309, 13)
target_shape = (1309, )
frame_shape = (1309, 14)
name_to_dtype = {
'pclass': np.float64,
'name': object,
'sex': CategoricalDtype(['female', 'male']),
'age': np.float64,
'sibsp': np.float64,
'parch': np.float64,
'ticket': object,
'fare': np.float64,
'cabin': object,
'embarked': CategoricalDtype(['C', 'Q', 'S']),
'boat': object,
'body': np.float64,
'home.dest': object,
'survived': CategoricalDtype(['0', '1'])
}
frame_columns = ['pclass', 'survived', 'name', 'sex', 'age', 'sibsp',
'parch', 'ticket', 'fare', 'cabin', 'embarked',
'boat', 'body', 'home.dest']
frame_dtypes = [name_to_dtype[col] for col in frame_columns]
feature_names = ['pclass', 'name', 'sex', 'age', 'sibsp',
'parch', 'ticket', 'fare', 'cabin', 'embarked',
'boat', 'body', 'home.dest']
target_name = 'survived'
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
bunch = fetch_openml(data_id=data_id, as_frame=True, cache=False)
data = bunch.data
target = bunch.target
frame = bunch.frame
assert isinstance(data, pd.DataFrame)
assert data.shape == data_shape
assert np.all(data.columns == feature_names)
assert bunch.target_names == [target_name]
assert isinstance(target, pd.Series)
assert target.shape == target_shape
assert target.name == target_name
assert target.dtype == name_to_dtype[target_name]
assert isinstance(frame, pd.DataFrame)
assert frame.shape == frame_shape
assert np.all(frame.dtypes == frame_dtypes)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_iris(monkeypatch, gzip_response):
# classification dataset with numeric only columns
data_id = 61
data_name = 'iris'
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"Multiple active versions of the dataset matching the name"
" iris exist. Versions may be fundamentally different, "
"returning version 1.",
fetch_openml,
name=data_name,
as_frame=False
)
def test_decode_iris(monkeypatch):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_iris_multitarget(monkeypatch, gzip_response):
# classification dataset with numeric only columns
data_id = 61
data_name = 'iris'
data_version = 1
target_column = ['sepallength', 'sepalwidth']
expected_observations = 150
expected_features = 3
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, np.float64, expect_sparse=False,
compare_default_target=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_anneal(monkeypatch, gzip_response):
# classification dataset with numeric and categorical columns
data_id = 2
data_name = 'anneal'
data_version = 1
target_column = 'class'
# Not all original instances included for space reasons
expected_observations = 11
expected_features = 38
expected_missing = 267
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
def test_decode_anneal(monkeypatch):
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_anneal_multitarget(monkeypatch, gzip_response):
# classification dataset with numeric and categorical columns
data_id = 2
data_name = 'anneal'
data_version = 1
target_column = ['class', 'product-type', 'shape']
# Not all original instances included for space reasons
expected_observations = 11
expected_features = 36
expected_missing = 267
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_cpu(monkeypatch, gzip_response):
# regression dataset with numeric and categorical columns
data_id = 561
data_name = 'cpu'
data_version = 1
target_column = 'class'
expected_observations = 209
expected_features = 7
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, np.float64, expect_sparse=False,
compare_default_target=True)
def test_decode_cpu(monkeypatch):
data_id = 561
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_australian(monkeypatch, gzip_response):
# sparse dataset
# Australian is the only sparse dataset that is reasonably small
# as it is inactive, we need to catch the warning. Due to mocking
# framework, it is not deactivated in our tests
data_id = 292
data_name = 'Australian'
data_version = 1
target_column = 'Y'
# Not all original instances included for space reasons
expected_observations = 85
expected_features = 14
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"Version 1 of dataset Australian is inactive,",
_fetch_dataset_from_openml,
**{'data_id': data_id, 'data_name': data_name,
'data_version': data_version,
'target_column': target_column,
'expected_observations': expected_observations,
'expected_features': expected_features,
'expected_missing': expected_missing,
'expect_sparse': True,
'expected_data_dtype': np.float64,
'expected_target_dtype': object,
'compare_default_target': False} # numpy specific check
)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_adultcensus(monkeypatch, gzip_response):
# Check because of the numeric row attribute (issue #12329)
data_id = 1119
data_name = 'adult-census'
data_version = 1
target_column = 'class'
# Not all original instances included for space reasons
expected_observations = 10
expected_features = 14
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_miceprotein(monkeypatch, gzip_response):
# JvR: very important check, as this dataset defined several row ids
# and ignore attributes. Note that data_features json has 82 attributes,
# and row id (1), ignore attributes (3) have been removed (and target is
# stored in data.target)
data_id = 40966
data_name = 'MiceProtein'
data_version = 4
target_column = 'class'
# Not all original instances included for space reasons
expected_observations = 7
expected_features = 77
expected_missing = 7
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_emotions(monkeypatch, gzip_response):
# classification dataset with multiple targets (natively)
data_id = 40589
data_name = 'emotions'
data_version = 3
target_column = ['amazed.suprised', 'happy.pleased', 'relaxing.calm',
'quiet.still', 'sad.lonely', 'angry.aggresive']
expected_observations = 13
expected_features = 72
expected_missing = 0
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
_fetch_dataset_from_openml(data_id, data_name, data_version, target_column,
expected_observations, expected_features,
expected_missing,
np.float64, object, expect_sparse=False,
compare_default_target=True)
def test_decode_emotions(monkeypatch):
data_id = 40589
_monkey_patch_webbased_functions(monkeypatch, data_id, False)
_test_features_list(data_id)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_open_openml_url_cache(monkeypatch, gzip_response, tmpdir):
data_id = 61
_monkey_patch_webbased_functions(
monkeypatch, data_id, gzip_response)
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
# first fill the cache
response1 = _open_openml_url(openml_path, cache_directory)
# assert file exists
location = _get_local_path(openml_path, cache_directory)
assert os.path.isfile(location)
# redownload, to utilize cache
response2 = _open_openml_url(openml_path, cache_directory)
assert response1.read() == response2.read()
@pytest.mark.parametrize('gzip_response', [True, False])
@pytest.mark.parametrize('write_to_disk', [True, False])
def test_open_openml_url_unlinks_local_path(
monkeypatch, gzip_response, tmpdir, write_to_disk):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
location = _get_local_path(openml_path, cache_directory)
def _mock_urlopen(request):
if write_to_disk:
with open(location, "w") as f:
f.write("")
raise ValueError("Invalid request")
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', _mock_urlopen)
with pytest.raises(ValueError, match="Invalid request"):
_open_openml_url(openml_path, cache_directory)
assert not os.path.exists(location)
def test_retry_with_clean_cache(tmpdir):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
location = _get_local_path(openml_path, cache_directory)
os.makedirs(os.path.dirname(location))
with open(location, 'w') as f:
f.write("")
@_retry_with_clean_cache(openml_path, cache_directory)
def _load_data():
# The first call will raise an error since location exists
if os.path.exists(location):
raise Exception("File exist!")
return 1
warn_msg = "Invalid cache, redownloading file"
with pytest.warns(RuntimeWarning, match=warn_msg):
result = _load_data()
assert result == 1
def test_retry_with_clean_cache_http_error(tmpdir):
data_id = 61
openml_path = sklearn.datasets._openml._DATA_FILE.format(data_id)
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
@_retry_with_clean_cache(openml_path, cache_directory)
def _load_data():
raise HTTPError(url=None, code=412,
msg='Simulated mock error',
hdrs=None, fp=None)
error_msg = "Simulated mock error"
with pytest.raises(HTTPError, match=error_msg):
_load_data()
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_cache(monkeypatch, gzip_response, tmpdir):
def _mock_urlopen_raise(request):
raise ValueError('This mechanism intends to test correct cache'
'handling. As such, urlopen should never be '
'accessed. URL: %s' % request.get_full_url())
data_id = 2
cache_directory = str(tmpdir.mkdir('scikit_learn_data'))
_monkey_patch_webbased_functions(
monkeypatch, data_id, gzip_response)
X_fetched, y_fetched = fetch_openml(data_id=data_id, cache=True,
data_home=cache_directory,
return_X_y=True, as_frame=False)
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen',
_mock_urlopen_raise)
X_cached, y_cached = fetch_openml(data_id=data_id, cache=True,
data_home=cache_directory,
return_X_y=True, as_frame=False)
np.testing.assert_array_equal(X_fetched, X_cached)
np.testing.assert_array_equal(y_fetched, y_cached)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_notarget(monkeypatch, gzip_response):
data_id = 61
target_column = None
expected_observations = 150
expected_features = 5
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
data = fetch_openml(data_id=data_id, target_column=target_column,
cache=False, as_frame=False)
assert data.data.shape == (expected_observations, expected_features)
assert data.target is None
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_inactive(monkeypatch, gzip_response):
# fetch inactive dataset by id
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
glas2 = assert_warns_message(
UserWarning, "Version 1 of dataset glass2 is inactive,", fetch_openml,
data_id=data_id, cache=False, as_frame=False)
# fetch inactive dataset by name and version
assert glas2.data.shape == (163, 9)
glas2_by_version = assert_warns_message(
UserWarning, "Version 1 of dataset glass2 is inactive,", fetch_openml,
data_id=None, name="glass2", version=1, cache=False, as_frame=False)
assert int(glas2_by_version.details['id']) == data_id
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_nonexiting(monkeypatch, gzip_response):
# there is no active version of glass2
data_id = 40675
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# Note that we only want to search by name (not data id)
assert_raise_message(ValueError, "No active dataset glass2 found",
fetch_openml, name='glass2', cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_raises_illegal_multitarget(monkeypatch, gzip_response):
data_id = 61
targets = ['sepalwidth', 'class']
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# Note that we only want to search by name (not data id)
assert_raise_message(ValueError,
"Can only handle homogeneous multi-target datasets,",
fetch_openml, data_id=data_id,
target_column=targets, cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_warn_ignore_attribute(monkeypatch, gzip_response):
data_id = 40966
expected_row_id_msg = "target_column={} has flag is_row_identifier."
expected_ignore_msg = "target_column={} has flag is_ignore."
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# single column test
assert_warns_message(UserWarning, expected_row_id_msg.format('MouseID'),
fetch_openml, data_id=data_id,
target_column='MouseID',
cache=False, as_frame=False)
assert_warns_message(UserWarning, expected_ignore_msg.format('Genotype'),
fetch_openml, data_id=data_id,
target_column='Genotype',
cache=False, as_frame=False)
# multi column test
assert_warns_message(UserWarning, expected_row_id_msg.format('MouseID'),
fetch_openml, data_id=data_id,
target_column=['MouseID', 'class'],
cache=False, as_frame=False)
assert_warns_message(UserWarning, expected_ignore_msg.format('Genotype'),
fetch_openml, data_id=data_id,
target_column=['Genotype', 'class'],
cache=False, as_frame=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_string_attribute_without_dataframe(monkeypatch, gzip_response):
data_id = 40945
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
# single column test
assert_raise_message(ValueError,
('STRING attributes are not supported for '
'array representation. Try as_frame=True'),
fetch_openml, data_id=data_id, cache=False,
as_frame=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_dataset_with_openml_error(monkeypatch, gzip_response):
data_id = 1
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"OpenML registered a problem with the dataset. It might be unusable. "
"Error:",
fetch_openml, data_id=data_id, cache=False, as_frame=False
)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_dataset_with_openml_warning(monkeypatch, gzip_response):
data_id = 3
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_warns_message(
UserWarning,
"OpenML raised a warning on the dataset. It might be unusable. "
"Warning:",
fetch_openml, data_id=data_id, cache=False, as_frame=False
)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_illegal_column(monkeypatch, gzip_response):
data_id = 61
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_raise_message(KeyError, "Could not find target_column=",
fetch_openml, data_id=data_id,
target_column='undefined', cache=False)
assert_raise_message(KeyError, "Could not find target_column=",
fetch_openml, data_id=data_id,
target_column=['undefined', 'class'],
cache=False)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_raises_missing_values_target(monkeypatch, gzip_response):
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
assert_raise_message(ValueError, "Target column ",
fetch_openml, data_id=data_id, target_column='family')
def test_fetch_openml_raises_illegal_argument():
assert_raise_message(ValueError, "Dataset data_id=",
fetch_openml, data_id=-1, name="name")
assert_raise_message(ValueError, "Dataset data_id=",
fetch_openml, data_id=-1, name=None,
version="version")
assert_raise_message(ValueError, "Dataset data_id=",
fetch_openml, data_id=-1, name="name",
version="version")
assert_raise_message(ValueError, "Neither name nor data_id are provided. "
"Please provide name or data_id.", fetch_openml)
@pytest.mark.parametrize('gzip_response', [True, False])
def test_fetch_openml_with_ignored_feature(monkeypatch, gzip_response):
# Regression test for #14340
# 62 is the ID of the ZOO dataset
data_id = 62
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response)
dataset = sklearn.datasets.fetch_openml(data_id=data_id, cache=False,
as_frame=False)
assert dataset is not None
# The dataset has 17 features, including 1 ignored (animal),
# so we assert that we don't have the ignored feature in the final Bunch
assert dataset['data'].shape == (101, 16)
assert 'animal' not in dataset['feature_names']
# Known failure of PyPy for OpenML. See the following issue:
# https://github.com/scikit-learn/scikit-learn/issues/18906
@fails_if_pypy
@pytest.mark.parametrize('as_frame', [True, False])
def test_fetch_openml_verify_checksum(monkeypatch, as_frame, cache, tmpdir):
if as_frame:
pytest.importorskip('pandas')
data_id = 2
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
# create a temporary modified arff file
dataset_dir = os.path.join(currdir, 'data', 'openml', str(data_id))
original_data_path = os.path.join(dataset_dir,
'data-v1-download-1666876.arff.gz')
corrupt_copy = os.path.join(tmpdir, "test_invalid_checksum.arff")
with gzip.GzipFile(original_data_path, "rb") as orig_gzip, \
gzip.GzipFile(corrupt_copy, "wb") as modified_gzip:
data = bytearray(orig_gzip.read())
data[len(data)-1] = 37
modified_gzip.write(data)
# Requests are already mocked by monkey_patch_webbased_functions.
# We want to re-use that mock for all requests except file download,
# hence creating a thin mock over the original mock
mocked_openml_url = sklearn.datasets._openml.urlopen
def swap_file_mock(request):
url = request.get_full_url()
if url.endswith('data/v1/download/1666876'):
return _MockHTTPResponse(open(corrupt_copy, "rb"), is_gzip=True)
else:
return mocked_openml_url(request)
monkeypatch.setattr(sklearn.datasets._openml, 'urlopen', swap_file_mock)
# validate failed checksum
with pytest.raises(ValueError) as exc:
sklearn.datasets.fetch_openml(data_id=data_id, cache=False,
as_frame=as_frame)
# exception message should have file-path
assert exc.match("1666876")
def test_convert_arff_data_type():
pytest.importorskip('pandas')
arff: ArffContainerType = {
'data': (el for el in range(2)),
'description': '',
'relation': '',
'attributes': []
}
msg = r"shape must be provided when arr\['data'\] is a Generator"
with pytest.raises(ValueError, match=msg):
_convert_arff_data(arff, [0], [0], shape=None)
arff = {
'data': list(range(2)),
'description': '',
'relation': '',
'attributes': []
}
msg = r"arff\['data'\] must be a generator when converting to pd.DataFrame"
with pytest.raises(ValueError, match=msg):
_convert_arff_data_dataframe(arff, ['a'], {})
def test_missing_values_pandas(monkeypatch):
"""check that missing values in categories are compatible with pandas
categorical"""
pytest.importorskip('pandas')
data_id = 42585
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
penguins = fetch_openml(data_id=data_id, cache=False, as_frame=True)
cat_dtype = penguins.data.dtypes['sex']
# there are nans in the categorical
assert penguins.data['sex'].isna().any()
assert_array_equal(cat_dtype.categories, ['FEMALE', 'MALE', '_'])
| bsd-3-clause |
jayflo/scikit-learn | sklearn/svm/tests/test_svm.py | 116 | 31653 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
vigilv/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
Unidata/MetPy | v1.0/_downloads/651190fdc2d21b9d54206699c3284920/surface_declarative.py | 6 | 2177 | # Copyright (c) 2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
=========================================
Surface Analysis using Declarative Syntax
=========================================
The MetPy declarative syntax allows for a simplified interface to creating common
meteorological analyses including surface observation plots.
"""
########################################
from datetime import datetime, timedelta
import cartopy.crs as ccrs
import pandas as pd
from metpy.cbook import get_test_data
import metpy.plots as mpplots
########################################
# **Getting the data**
#
# In this example, data is originally from the Iowa State ASOS archive
# (https://mesonet.agron.iastate.edu/request/download.phtml) downloaded through a separate
# Python script. The data are pre-processed to determine sky cover and weather symbols from
# text output.
data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False),
infer_datetime_format=True, parse_dates=['valid'])
########################################
# **Plotting the data**
#
# Use the declarative plotting interface to plot surface observations over the state of
# Georgia.
# Plotting the Observations using a 15 minute time window for surface observations
obs = mpplots.PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 12, 13)
obs.time_window = timedelta(minutes=15)
obs.level = None
obs.fields = ['tmpf', 'dwpf', 'emsl', 'cloud_cover', 'wxsym']
obs.locations = ['NW', 'SW', 'NE', 'C', 'W']
obs.colors = ['red', 'green', 'black', 'black', 'blue']
obs.formats = [None, None, lambda v: format(10 * v, '.0f')[-3:], 'sky_cover',
'current_weather']
obs.vector_field = ('uwind', 'vwind')
obs.reduce_points = 1
# Add map features for the particular panel
panel = mpplots.MapPanel()
panel.layout = (1, 1, 1)
panel.area = 'ga'
panel.projection = ccrs.PlateCarree()
panel.layers = ['coastline', 'borders', 'states']
panel.plots = [obs]
# Collecting panels for complete figure
pc = mpplots.PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
# Showing the results
pc.show()
| bsd-3-clause |
spectralDNS/shenfun | demo/laguerre_dirichlet_poisson1D.py | 1 | 1587 | r"""
Solve Poisson equation in 1D with homogeneous Dirichlet bcs on the domain [0, inf)
\nabla^2 u = f,
The equation to solve for a Laguerre basis is
(\nabla u, \nabla v) = -(f, v)
"""
import os
import sys
from sympy import symbols, sin, exp, lambdify
import numpy as np
from shenfun import inner, grad, TestFunction, TrialFunction, \
Array, Function, FunctionSpace, dx
assert len(sys.argv) == 2, 'Call with one command-line argument'
assert isinstance(int(sys.argv[-1]), int)
# Use sympy to compute a rhs, given an analytical solution
x = symbols("x", real=True)
ue = sin(2*x)*exp(-x)
fe = ue.diff(x, 2)
# Size of discretization
N = int(sys.argv[-1])
SD = FunctionSpace(N, 'Laguerre', bc=(0, 0))
u = TrialFunction(SD)
v = TestFunction(SD)
# Get f on quad points
fj = Array(SD, buffer=fe)
# Compute right hand side of Poisson equation
f_hat = Function(SD)
f_hat = inner(v, -fj, output_array=f_hat)
# Get left hand side of Poisson equation
#A = inner(v, -div(grad(u)))
A = inner(grad(v), grad(u))
f_hat = A.solve(f_hat)
uj = f_hat.backward()
uh = uj.forward()
# Compare with analytical solution
ua = Array(SD, buffer=ue)
print("Error=%2.16e" %(np.linalg.norm(uj-ua)))
print("Error=%2.16e" %(np.sqrt(dx(uj-ua)**2)))
assert np.allclose(uj, ua, atol=1e-5)
point = np.array([0.1, 0.2])
p = SD.eval(point, f_hat)
assert np.allclose(p, lambdify(x, ue)(point), atol=1e-5)
if 'pytest' not in os.environ:
import matplotlib.pyplot as plt
xx = np.linspace(0, 16, 100)
plt.plot(xx, lambdify(x, ue)(xx), 'r', xx, uh.eval(xx), 'bo', markersize=2)
plt.show()
| bsd-2-clause |
hehongliang/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 27 | 46439 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(
tf_fn, labeled_tensor.tensor, dtype=first_map_lt.dtype)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keepdims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
rishikksh20/scikit-learn | examples/neighbors/plot_classification.py | 58 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
grollins/orts | run.py | 1 | 2840 | import numpy as np
import pandas as pd
from timer import Timer
def bubblesort(x):
bound = len(x)-1
while 1:
t = 0
for j in range(bound):
if x[j] > x[j+1]:
x[j], x[j+1] = x[j+1], x[j]
t = j
if t == 0:
break
bound = t
return x
def quicksort(x, left=0, right=None):
if right is None:
right = len(x) - 1
l = left
r = right
if l <= r:
mid = x[(left+right)/2]
while l <= r:
while l <= right and x[l] < mid:
l += 1
while r > left and x[r] > mid:
r -= 1
if l <= r:
x[l], x[r] = x[r], x[l]
l+=1
r-=1
if left < r:
quicksort(x, left, r)
if l < right:
quicksort(x, l, right)
return x
def stoogesort(x, i=0, j=None):
if j is None:
j = len(x) - 1
if x[j] < x[i]:
x[i], x[j] = x[j], x[i]
if j - i > 1:
t = (j - i + 1) // 3
stoogesort(x, i, j - t)
stoogesort(x, i + t, j)
stoogesort(x, i, j - t)
return x
def sift(x, start, count):
root = start
while (root * 2) + 1 < count:
child = (root * 2) + 1
if child < (count-1) and x[child] < x[child+1]:
child += 1
if x[root] < x[child]:
x[root], x[child] = x[child], x[root]
root = child
else:
return
def heapsort(x):
start = (len(x)/2)-1
end = len(x)-1
while start >= 0:
sift(x, start, len(x))
start -= 1
while end > 0:
x[end], x[0] = x[0], x[end]
sift(x, 0, end)
end -= 1
def compute_sorting_time(sort_fcn, data):
t = []
for i in xrange(data.shape[0]):
with Timer() as timer:
sort_fcn(data[i,:])
t.append(timer.msecs)
return t
#-------
# MAIN
#-------
N_list = [50, 100, 250, 500, 750, 1000]
sort_fcn_list = [('bubblesort', bubblesort),
('quicksort', quicksort),
('heapsort', heapsort)]
#('stoogesort', stoogesort)]
timing_list = []
for N in N_list:
X = np.random.randint(0, 1000, size=(1000,N))
for sort_fcn_name, sort_fcn in sort_fcn_list:
print N, sort_fcn_name
Y = X.copy()
t = compute_sorting_time(sort_fcn, Y)
print Y[0,:]
timing_df = pd.DataFrame({'N': N, 'alg': sort_fcn_name,
'run': range(len(t)), 'time': t})
timing_list.append(timing_df)
timing_tidy_df = (pd.concat(timing_list)
.reset_index())
timing_stats = (timing_tidy_df.groupby(['N', 'alg'])
.agg({'time': [np.mean, np.std]}))
print timing_stats
timing_tidy_df.to_pickle('sort_timing.pkl')
| unlicense |
zorroblue/scikit-learn | sklearn/preprocessing/tests/test_label.py | 10 | 18657 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
# one-class case defaults to negative label
# For dense case:
inp = ["pos", "pos", "pos", "pos"]
lb = LabelBinarizer(sparse_output=False)
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# For sparse case:
lb = LabelBinarizer(sparse_output=True)
got = lb.fit_transform(inp)
assert_true(issparse(got))
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got.toarray())
assert_array_equal(lb.inverse_transform(got.toarray()), inp)
lb = LabelBinarizer(sparse_output=False)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, -1, 1])
msg = "contains previously unseen labels"
assert_raise_message(ValueError, msg, le.inverse_transform, [-2])
assert_raise_message(ValueError, msg, le.inverse_transform, [-2, -3, -4])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_transform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert_equal(got.indices.dtype, got.indptr.dtype)
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert_equal(got.indices.dtype, got.indptr.dtype)
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_transform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
MohammedWasim/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 206 | 7643 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
Kleptobismol/scikit-bio | skbio/stats/distance/tests/test_permanova.py | 1 | 8466 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six import StringIO
from functools import partial
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from pandas.util.testing import assert_series_equal
from skbio import DistanceMatrix
from skbio.stats.distance import permanova, PERMANOVA
class TestPERMANOVA(TestCase):
"""All results were verified with R (vegan::adonis)."""
def setUp(self):
# Distance matrices with and without ties in the ranks, with 2 groups
# of equal size.
dm_ids = ['s1', 's2', 's3', 's4']
self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
self.df = pd.read_csv(
StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
's1,Control'), index_col=0)
self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
[1, 0, 3, 2],
[1, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
[1, 0, 3, 2],
[5, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
# Test with 3 groups of unequal size.
self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
'Treatment1', 'Control', 'Control']
# Equivalent grouping but with different labels -- groups should be
# assigned different integer labels but results should be the same.
self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z']
self.dm_unequal = DistanceMatrix(
[[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
[1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
[0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
[0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
[1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
[1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
['s1', 's2', 's3', 's4', 's5', 's6'])
# Expected series index is the same across all tests.
self.exp_index = ['method name', 'test statistic name', 'sample size',
'number of groups', 'test statistic', 'p-value',
'number of permutations']
# Stricter series equality testing than the default.
self.assert_series_equal = partial(assert_series_equal,
check_index_type=True,
check_series_type=True)
def test_call_ties(self):
# Ensure we get the same results if we rerun the method using the same
# inputs. Also ensure we get the same results if we run the method
# using a grouping vector or a data frame with equivalent groupings.
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 2.0, 0.671, 999])
for _ in range(2):
np.random.seed(0)
obs = permanova(self.dm_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
for _ in range(2):
np.random.seed(0)
obs = permanova(self.dm_ties, self.df, column='Group')
self.assert_series_equal(obs, exp)
def test_call_no_ties(self):
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, 0.332, 999])
np.random.seed(0)
obs = permanova(self.dm_no_ties, self.grouping_equal)
self.assert_series_equal(obs, exp)
def test_call_no_permutations(self):
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 4, 2, 4.4, np.nan, 0])
obs = permanova(self.dm_no_ties, self.grouping_equal, permutations=0)
self.assert_series_equal(obs, exp)
def test_call_unequal_group_sizes(self):
exp = pd.Series(index=self.exp_index,
data=['PERMANOVA', 'pseudo-F', 6, 3, 0.578848, 0.645,
999])
np.random.seed(0)
obs = permanova(self.dm_unequal, self.grouping_unequal)
self.assert_series_equal(obs, exp)
np.random.seed(0)
obs = permanova(self.dm_unequal, self.grouping_unequal_relabeled)
self.assert_series_equal(obs, exp)
class TestPERMANOVAClass(TestCase):
"""All results were verified with R (vegan::adonis)."""
def setUp(self):
# Distance matrices with and without ties in the ranks, with 2 groups
# of equal size.
dm_ids = ['s1', 's2', 's3', 's4']
grouping_equal = ['Control', 'Control', 'Fast', 'Fast']
df = pd.read_csv(
StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n'
's1,Control'), index_col=0)
self.dm_ties = DistanceMatrix([[0, 1, 1, 4],
[1, 0, 3, 2],
[1, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4],
[1, 0, 3, 2],
[5, 3, 0, 3],
[4, 2, 3, 0]], dm_ids)
# Test with 3 groups of unequal size.
grouping_unequal = ['Control', 'Treatment1', 'Treatment2',
'Treatment1', 'Control', 'Control']
self.dm_unequal = DistanceMatrix(
[[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0],
[1.0, 0.0, 0.002, 0.42, 0.998, 0.0],
[0.1, 0.002, 0.0, 1.0, 0.123, 1.0],
[0.5678, 0.42, 1.0, 0.0, 0.123, 0.43],
[1.0, 0.998, 0.123, 0.123, 0.0, 0.5],
[1.0, 0.0, 1.0, 0.43, 0.5, 0.0]],
['s1', 's2', 's3', 's4', 's5', 's6'])
self.permanova_ties = PERMANOVA(self.dm_ties, grouping_equal)
self.permanova_no_ties = PERMANOVA(self.dm_no_ties, grouping_equal)
self.permanova_ties_df = PERMANOVA(self.dm_ties, df, column='Group')
self.permanova_unequal = PERMANOVA(self.dm_unequal, grouping_unequal)
def test_call_ties(self):
# Ensure we get the same results if we rerun the method on the same
# object. Also ensure we get the same results if we run the method
# using a grouping vector or a data frame with equivalent groupings.
for inst in self.permanova_ties, self.permanova_ties_df:
for trial in range(2):
np.random.seed(0)
obs = inst()
self.assertEqual(obs.sample_size, 4)
npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
self.assertAlmostEqual(obs.statistic, 2.0)
self.assertAlmostEqual(obs.p_value, 0.671)
self.assertEqual(obs.permutations, 999)
def test_call_no_ties(self):
np.random.seed(0)
obs = self.permanova_no_ties()
self.assertEqual(obs.sample_size, 4)
npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
self.assertAlmostEqual(obs.statistic, 4.4)
self.assertAlmostEqual(obs.p_value, 0.332)
self.assertEqual(obs.permutations, 999)
def test_call_no_permutations(self):
obs = self.permanova_no_ties(0)
self.assertEqual(obs.sample_size, 4)
npt.assert_array_equal(obs.groups, ['Control', 'Fast'])
self.assertAlmostEqual(obs.statistic, 4.4)
self.assertEqual(obs.p_value, None)
self.assertEqual(obs.permutations, 0)
def test_call_unequal_group_sizes(self):
np.random.seed(0)
obs = self.permanova_unequal()
self.assertEqual(obs.sample_size, 6)
npt.assert_array_equal(obs.groups,
['Control', 'Treatment1', 'Treatment2'])
self.assertAlmostEqual(obs.statistic, 0.578848, 6)
self.assertAlmostEqual(obs.p_value, 0.645)
self.assertEqual(obs.permutations, 999)
if __name__ == '__main__':
main()
| bsd-3-clause |
pydata/vbench | vbench/db.py | 3 | 5573 | from pandas import DataFrame
from sqlalchemy import Table, Column, MetaData, create_engine, ForeignKey
from sqlalchemy import types as sqltypes
from sqlalchemy import sql
import logging
log = logging.getLogger('vb.db')
class BenchmarkDB(object):
"""
Persist vbench results in a sqlite3 database
"""
def __init__(self, dbpath):
log.info("Initializing DB at %s" % dbpath)
self.dbpath = dbpath
self._engine = create_engine('sqlite:///%s' % dbpath)
self._metadata = MetaData()
self._metadata.bind = self._engine
self._benchmarks = Table('benchmarks', self._metadata,
Column('checksum', sqltypes.String(32), primary_key=True),
Column('name', sqltypes.String(200), nullable=False),
Column('description', sqltypes.Text)
)
self._results = Table('results', self._metadata,
Column('checksum', sqltypes.String(32),
ForeignKey('benchmarks.checksum'), primary_key=True),
Column('revision', sqltypes.String(50), primary_key=True),
Column('timestamp', sqltypes.DateTime, nullable=False),
Column('ncalls', sqltypes.String(50)),
Column('timing', sqltypes.Float),
Column('traceback', sqltypes.Text),
)
self._blacklist = Table('blacklist', self._metadata,
Column('revision', sqltypes.String(50), primary_key=True)
)
self._ensure_tables_created()
_instances = {}
@classmethod
def get_instance(cls, dbpath):
if dbpath not in cls._instances:
cls._instances[dbpath] = BenchmarkDB(dbpath)
return cls._instances[dbpath]
def _ensure_tables_created(self):
log.debug("Ensuring DB tables are created")
self._benchmarks.create(self._engine, checkfirst=True)
self._results.create(self._engine, checkfirst=True)
self._blacklist.create(self._engine, checkfirst=True)
def update_name(self, benchmark):
"""
benchmarks : list
"""
table = self._benchmarks
stmt = (table.update().
where(table.c.checksum == benchmark.checksum).
values(checksum=benchmark.checksum))
self.conn.execute(stmt)
def restrict_to_benchmarks(self, benchmarks):
"""
benchmarks : list
"""
checksums = set([b.checksum for b in benchmarks])
ex_benchmarks = self.get_benchmarks()
to_delete = set(ex_benchmarks.index) - checksums
t = self._benchmarks
for chksum in to_delete:
log.info('Deleting %s\n%s' % (chksum, ex_benchmarks.xs(chksum)))
stmt = t.delete().where(t.c.checksum == chksum)
self.conn.execute(stmt)
@property
def conn(self):
return self._engine.connect()
def write_benchmark(self, bm, overwrite=False):
"""
"""
ins = self._benchmarks.insert()
ins = ins.values(name=bm.name, checksum=bm.checksum,
description=bm.description)
self.conn.execute(ins) # XXX: return the result?
def delete_benchmark(self, checksum):
"""
"""
pass
def write_result(self, checksum, revision, timestamp, ncalls,
timing, traceback=None, overwrite=False):
"""
"""
ins = self._results.insert()
ins = ins.values(checksum=checksum, revision=revision,
timestamp=timestamp,
ncalls=ncalls, timing=timing, traceback=traceback)
self.conn.execute(ins) # XXX: return the result?
def delete_result(self, checksum, revision):
"""
"""
pass
def delete_error_results(self):
tab = self._results
ins = tab.delete()
ins = ins.where(tab.c.timing == None)
self.conn.execute(ins)
def get_benchmarks(self):
stmt = sql.select([self._benchmarks])
result = self.conn.execute(stmt)
return _sqa_to_frame(result).set_index('checksum')
def get_rev_results(self, rev):
tab = self._results
stmt = sql.select([tab],
sql.and_(tab.c.revision == rev))
results = list(self.conn.execute(stmt))
return dict((v.checksum, v) for v in results)
def delete_rev_results(self, rev):
tab = self._results
stmt = tab.delete().where(tab.c.revision == rev)
self.conn.execute(stmt)
def add_rev_blacklist(self, rev):
"""
Don't try running this revision again
"""
stmt = self._blacklist.insert().values(revision=rev)
self.conn.execute(stmt)
def get_rev_blacklist(self):
stmt = self._blacklist.select()
return [x['revision'] for x in self.conn.execute(stmt)]
def clear_blacklist(self):
stmt = self._blacklist.delete()
self.conn.execute(stmt)
def get_benchmark_results(self, checksum):
"""
"""
tab = self._results
stmt = sql.select([tab.c.timestamp, tab.c.revision, tab.c.ncalls,
tab.c.timing, tab.c.traceback],
sql.and_(tab.c.checksum == checksum))
results = self.conn.execute(stmt)
df = _sqa_to_frame(results).set_index('timestamp')
return df.sort_index()
def _sqa_to_frame(result):
rows = [tuple(x) for x in result]
if not rows:
return DataFrame(columns=result.keys())
return DataFrame.from_records(rows, columns=result.keys())
| mit |
herilalaina/scikit-learn | sklearn/gaussian_process/gpr.py | 9 | 20571 | """Gaussian processes regression. """
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_X_y, check_array
from sklearn.utils.deprecation import deprecated
class GaussianProcessRegressor(BaseEstimator, RegressorMixin):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations.
This can also prevent a potential numerical issue during fitting, by
ensuring that the calculated values form a positive definite matrix.
If an array is passed, it must have the same number of entries as the
data used for fitting and is used as datapoint-dependent noise level.
Note that this is equivalent to adding a WhiteKernel with c=alpha.
Allowing to specify the noise level directly as a parameter is mainly
for convenience and for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, optional (default: None)
The generator used to initialize the centers. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, alpha=1e-10,
optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
@property
@deprecated("Attribute rng was deprecated in version 0.19 and "
"will be removed in 0.21.")
def rng(self):
return self._rng
@property
@deprecated("Attribute y_train_mean was deprecated in version 0.19 and "
"will be removed in 0.21.")
def y_train_mean(self):
return self._y_train_mean
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
# demean y
y = y - self._y_train_mean
else:
self._y_train_mean = np.zeros(1)
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=True) # Line 2
# self.L_ changed, self._K_inv needs to be recomputed
self._K_inv = None
except np.linalg.LinAlgError as exc:
exc.args = ("The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% self.kernel_,) + exc.args
raise
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True).
Note that at most one of the two can be requested.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance.")
X = check_array(X)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
if self.kernel is None:
kernel = (C(1.0, constant_value_bounds="fixed") *
RBF(1.0, length_scale_bounds="fixed"))
else:
kernel = self.kernel
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = kernel(X)
return y_mean, y_cov
elif return_std:
y_var = kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
y_mean = self._y_train_mean + y_mean # undo normal.
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
return y_mean, y_cov
elif return_std:
# cache result of K_inv computation
if self._K_inv is None:
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T,
np.eye(self.L_.shape[0]))
self._K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ij,ij->i",
np.dot(K_trans, self._K_inv), K_trans)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like, shape = (n_samples_X, n_features)
Query points where the GP samples are evaluated
n_samples : int, default: 1
The number of samples drawn from the Gaussian process
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the
random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
Returns
-------
y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = \
[rng.multivariate_normal(y_mean[:, i], y_cov,
n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
| bsd-3-clause |
fmfn/UnbalancedDataset | imblearn/over_sampling/_smote/tests/test_borderline_smote.py | 3 | 1920 | import pytest
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_equal
from imblearn.over_sampling import BorderlineSMOTE
@pytest.fixture
def data():
X = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
]
)
y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
return X, y
def test_borderline_smote_wrong_kind(data):
bsmote = BorderlineSMOTE(kind="rand")
with pytest.raises(ValueError, match='The possible "kind" of algorithm'):
bsmote.fit_resample(*data)
@pytest.mark.parametrize("kind", ["borderline-1", "borderline-2"])
def test_borderline_smote(kind, data):
bsmote = BorderlineSMOTE(kind=kind, random_state=42)
bsmote_nn = BorderlineSMOTE(
kind=kind,
random_state=42,
k_neighbors=NearestNeighbors(n_neighbors=6),
m_neighbors=NearestNeighbors(n_neighbors=11),
)
X_res_1, y_res_1 = bsmote.fit_resample(*data)
X_res_2, y_res_2 = bsmote_nn.fit_resample(*data)
assert_allclose(X_res_1, X_res_2)
assert_array_equal(y_res_1, y_res_2)
| mit |
jcatw/scnn | scnn/data.py | 1 | 6116 | __author__ = 'jatwood'
import numpy as np
import cPickle as cp
import inspect
import os
current_dir = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
def parse_cora(plot=False):
path = "%s/data/cora/" % (current_dir,)
id2index = {}
label2index = {
'Case_Based': 0,
'Genetic_Algorithms': 1,
'Neural_Networks': 2,
'Probabilistic_Methods': 3,
'Reinforcement_Learning': 4,
'Rule_Learning': 5,
'Theory': 6
}
features = []
labels = []
with open(path + 'cora.content', 'r') as f:
i = 0
for line in f.xreadlines():
items = line.strip().split('\t')
id = items[0]
# 1-hot encode labels
label = np.zeros(len(label2index))
label[label2index[items[-1]]] = 1
labels.append(label)
# parse features
features.append([int(x) for x in items[1:-1]])
id2index[id] = i
i += 1
features = np.asarray(features, dtype='float32')
labels = np.asarray(labels, dtype='int32')
n_papers = len(id2index)
adj = np.zeros((n_papers, n_papers), dtype='float32')
with open(path + 'cora.cites', 'r') as f:
for line in f.xreadlines():
items = line.strip().split('\t')
adj[ id2index[items[0]], id2index[items[1]] ] = 1.0
# undirected
adj[ id2index[items[1]], id2index[items[0]] ] = 1.0
if plot:
import networkx as nx
import matplotlib.pyplot as plt
#G = nx.from_numpy_matrix(adj, nx.DiGraph())
G = nx.from_numpy_matrix(adj, nx.Graph())
print G.order()
print G.size()
plt.figure()
nx.draw(G, node_size=10, edge_size=1)
plt.savefig('../data/cora_net.pdf')
plt.figure()
plt.imshow(adj)
plt.savefig('../data/cora_adj.pdf')
return adj.astype('float32'), features.astype('float32'), labels.astype('int32')
def parse_pubmed():
path = '%s/data/Pubmed-Diabetes/data/' % (current_dir,)
n_nodes = 19717
n_features = 500
n_classes = 3
data_X = np.zeros((n_nodes, n_features), dtype='float32')
data_Y = np.zeros((n_nodes, n_classes), dtype='int32')
paper_to_index = {}
feature_to_index = {}
# parse nodes
with open(path + 'Pubmed-Diabetes.NODE.paper.tab','r') as node_file:
# first two lines are headers
node_file.readline()
node_file.readline()
k = 0
for i,line in enumerate(node_file.xreadlines()):
items = line.strip().split('\t')
paper_id = items[0]
paper_to_index[paper_id] = i
# label=[1,2,3]
label = int(items[1].split('=')[-1]) - 1 # subtract 1 to zero-count
data_Y[i,label] = 1.
# f1=val1 \t f2=val2 \t ... \t fn=valn summary=...
features = items[2:-1]
for feature in features:
parts = feature.split('=')
fname = parts[0]
fvalue = float(parts[1])
if fname not in feature_to_index:
feature_to_index[fname] = k
k += 1
data_X[i, feature_to_index[fname]] = fvalue
# parse graph
data_A = np.zeros((n_nodes, n_nodes), dtype='float32')
with open(path + 'Pubmed-Diabetes.DIRECTED.cites.tab','r') as edge_file:
# first two lines are headers
edge_file.readline()
edge_file.readline()
for i,line in enumerate(edge_file.xreadlines()):
# edge_id \t paper:tail \t | \t paper:head
items = line.strip().split('\t')
edge_id = items[0]
tail = items[1].split(':')[-1]
head = items[3].split(':')[-1]
data_A[paper_to_index[tail],paper_to_index[head]] = 1.0
data_A[paper_to_index[head],paper_to_index[tail]] = 1.0
return data_A, data_X, data_Y
def parse_nci(graph_name='nci1.graph', with_structural_features=False):
path = "%s/data/" % (current_dir,)
if graph_name == 'nci1.graph':
maxval = 37
n_classes = 2
elif graph_name == 'nci109.graph':
maxval = 38
n_classes = 2
elif graph_name == 'mutag.graph':
maxval = 7
n_classes = 2
elif graph_name == 'ptc.graph':
maxval = 22
n_classes = 2
elif graph_name == 'enzymes.graph':
maxval = 3
n_classes = 6
with open(path+graph_name,'r') as f:
raw = cp.load(f)
n_graphs = len(raw['graph'])
A = []
rX = []
Y = np.zeros((n_graphs, n_classes), dtype='int32')
for i in range(n_graphs):
# Set label
class_label = raw['labels'][i]
if n_classes == 2:
if class_label == 1:
Y[i,1] = 1
else:
Y[i,0] = 1
else:
Y[i, class_label-1] = 1
# Parse graph
G = raw['graph'][i]
n_nodes = len(G)
a = np.zeros((n_nodes,n_nodes), dtype='float32')
x = np.zeros((n_nodes,maxval), dtype='float32')
for node, meta in G.iteritems():
label = meta['label'][0] - 1
x[node, label] = 1
for neighbor in meta['neighbors']:
a[node, neighbor] = 1
A.append(a)
rX.append(x)
print Y.sum(0)
if with_structural_features:
import networkx as nx
for i in range(len(rX)):
struct_feat = np.zeros((rX[i].shape[0], 3))
# degree
struct_feat[:,0] = A[i].sum(1)
G = nx.from_numpy_matrix(A[i])
# pagerank
prank = nx.pagerank_numpy(G)
struct_feat[:,1] = np.asarray([prank[k] for k in range(A[i].shape[0])])
# clustering
clust = nx.clustering(G)
struct_feat[:,2] = np.asarray([clust[k] for k in range(A[i].shape[0])])
rX[i] = np.hstack((rX[i],struct_feat))
return A, rX, Y
| mit |
harsham05/image_space | flann_index/image_match.py | 12 | 4269 | # import the necessary packages
from optparse import OptionParser
from scipy.spatial import distance as dist
import matplotlib.pyplot as plt
import numpy as np
import argparse
import glob
import cv2
import sys
import pickle
###########################
def image_match_histogram( all_files, options ):
histograms = {}
image_files = []
# loop over all images
for (i, fname) in enumerate(all_files):
if options.ipath:
path_fname = options.ipath + '/' + fname
else:
path_fname = fname
# read in image
image = cv2.imread( path_fname );
if image is None:
print path_fname + " : fail to read"
continue
image_files.append(fname)
if image.shape[2] == 1:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
print i, path_fname, image.shape
v = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
v = v.flatten()
hist = v / sum(v)
histograms[fname] = hist
pickle.dump( histograms, open( options.opath+"/color_feature.p","wb") )
# feature matrix
feature_matrix = np.zeros( (len(histograms), len(hist)) )
for (i,fi) in enumerate(image_files):
feature_matrix[i,:] = histograms[image_files[i]]
pickle.dump( feature_matrix, open( options.opath+"/color_matrix.p","wb") )
dists = np.zeros((len(image_files), len(image_files)))
knn = {}
# pairwise comparison
for (i, fi) in enumerate(image_files):
for (j, fj) in enumerate(image_files):
if i <= j:
d = cv2.compareHist( histograms[fi], histograms[fj], cv2.cv.CV_COMP_INTERSECT)
dists[i,j] = d
dists[j,i] = d
pickle.dump( dists, open( options.opath+"/color_affinity.p","wb") )
# K nearest neighbors
k=int(options.top)
print 'knn'
for (i, fi) in enumerate(image_files):
vec = sorted( zip(dists[i,:], image_files), reverse = True )
knn[fi] = vec[:k]
print knn[fi]
pickle.dump( knn, open( options.opath+"/color_knn.p","wb") )
# Kmeans clustering
term_crit = (cv2.TERM_CRITERIA_EPS, 100, 0.01)
print feature_matrix
ret, labels, centers = cv2.kmeans(np.float32(feature_matrix), int(options.cluster_count), term_crit, 10, cv2.KMEANS_RANDOM_CENTERS )
label_list=[]
for (i,l) in enumerate(labels):
label_list.append(l[0])
print label_list
image_label = zip( image_files, label_list )
print image_label
pickle.dump( image_label, open( options.opath+"/color_clustering.p","wb") )
###########################
def main():
usage = "usage: %prog [options] image_list_file \n"
usage += " image match"
parser = OptionParser(usage=usage)
parser.add_option("-i", "--input_path", default="",
action="store", dest="ipath",
help="input path")
parser.add_option("-o", "--output_path", default=".",
action="store", dest="opath",
help="output path")
parser.add_option("-f", "--feature", default="color_histogram",
action="store", dest="feature",
help="color_histogram; sift_match;dist_info")
parser.add_option("-m", "--method", default="Intersection",
action="store", dest="method",
help="Intersection;L1;L2")
parser.add_option("-t", "--top", default="5",
action="store", dest="top",
help="Top nearest neighbors")
parser.add_option("-c", "--cluster_count", default="3",
action="store", dest="cluster_count",
help="Number of clusters")
parser.add_option("-d", "--debug", default="0",
action="store", dest="debug_mode",
help="debug intermediate results")
(options, args) = parser.parse_args()
if len(args) < 1 :
print "Need one argument: image_list_file \n"
sys.exit(1)
image_files = [line.strip() for line in open(args[0])]
if options.feature == "color_histogram":
image_match_histogram( image_files, options )
if __name__=="__main__":
main()
| apache-2.0 |
andaag/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/sklearn/preprocessing/__init__.py | 268 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| mit |
GuessWhoSamFoo/pandas | pandas/core/strings.py | 1 | 100753 | # -*- coding: utf-8 -*-
import codecs
import re
import textwrap
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
import pandas.compat as compat
from pandas.compat import zip
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_object, is_bool_dtype, is_categorical_dtype, is_integer,
is_list_like, is_object_dtype, is_re, is_scalar, is_string_like)
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import take_1d
from pandas.core.base import NoNewAttributesMixin
import pandas.core.common as com
_cpython_optimized_encoders = (
"utf-8", "utf8", "latin-1", "latin1", "iso-8859-1", "mbcs", "ascii"
)
_cpython_optimized_decoders = _cpython_optimized_encoders + (
"utf-16", "utf-32"
)
_shared_docs = dict()
def cat_core(list_of_columns, sep):
"""
Auxiliary function for :meth:`str.cat`
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns
Returns
-------
nd.array
The concatenation of list_of_columns with sep
"""
list_with_sep = [sep] * (2 * len(list_of_columns) - 1)
list_with_sep[::2] = list_of_columns
return np.sum(list_with_sep, axis=0)
def _na_map(f, arr, na_result=np.nan, dtype=object):
# should really _check_ for NA
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, ABCSeries):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isna(arr)
try:
convert = not all(mask)
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
except (TypeError, AttributeError) as e:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
if len(e.args) >= 1 and re.search(p_err, e.args[0]):
raise e
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the
:class:`~pandas.Series`.
Parameters
----------
pat : str
Valid regular expression.
flags : int, default 0, meaning no flags
Flags for the `re` module. For a complete list, `see here
<https://docs.python.org/3/howto/regex.html#compilation-flags>`_.
**kwargs
For compatibility with other string methods. Not used.
Returns
-------
counts : Series or Index
Same type as the calling object containing the integer counts.
See Also
--------
re : Standard library module for regular expressions.
str.count : Standard library version, without regular expression support.
Notes
-----
Some characters need to be escaped when passing in `pat`.
eg. ``'$'`` has a special meaning in regex and must be escaped when
finding this literal character.
Examples
--------
>>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])
>>> s.str.count('a')
0 0.0
1 0.0
2 2.0
3 2.0
4 NaN
5 0.0
6 1.0
dtype: float64
Escape ``'$'`` to find the literal dollar sign.
>>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
>>> s.str.count('\\$')
0 1
1 0
2 1
3 2
4 2
5 0
dtype: int64
This is also available on Index
>>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a')
Int64Index([0, 0, 2, 1], dtype='int64')
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return _na_map(f, arr, dtype=int)
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Test if pattern or regex is contained within a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern or regex is
contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series or Index of boolean values
A Series or Index of boolean values indicating whether the
given pattern is contained within the string of each element
of the Series or Index.
See Also
--------
match : Analogous, but stricter, relying on re.match instead of re.search.
Series.str.startswith : Test if the start of each string element matches a
pattern.
Series.str.endswith : Same as startswith, but tests the end of string.
Examples
--------
Returning a Series of booleans using only a literal pattern.
>>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 NaN
dtype: object
Returning an Index of booleans using only a literal pattern.
>>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN])
>>> ind.str.contains('23', regex=False)
Index([False, False, False, True, nan], dtype='object')
Specifying case sensitivity using `case`.
>>> s1.str.contains('oG', case=True, regex=True)
0 False
1 False
2 False
3 False
4 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN` replaces NaN values
with `False`. If Series or Index does not contain NaN values
the resultant dtype will be `bool`, otherwise, an `object` dtype.
>>> s1.str.contains('og', na=False, regex=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
Returning 'house' or 'dog' when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 NaN
dtype: object
Ignoring case sensitivity using `flags` with regex.
>>> import re
>>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
3 False
4 NaN
dtype: object
Returning any digit using regular expression.
>>> s1.str.contains('\\d', regex=True)
0 False
1 False
2 False
3 True
4 NaN
dtype: object
Ensure `pat` is a not a literal pattern when `regex` is set to True.
Note in the following example one might expect only `s2[1]` and `s2[3]` to
return `True`. However, '.0' as a regex matches any character
followed by a 0.
>>> s2 = pd.Series(['40','40.0','41','41.0','35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning,
stacklevel=3)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
def str_startswith(arr, pat, na=np.nan):
"""
Test if the start of each string element matches a pattern.
Equivalent to :meth:`str.startswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the start of each string element.
See Also
--------
str.startswith : Python standard library string method.
Series.str.endswith : Same as startswith, but tests the end of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])
>>> s
0 bat
1 Bear
2 cat
3 NaN
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.startswith('b', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_endswith(arr, pat, na=np.nan):
"""
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):
r"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : string or compiled regex
String can be a character sequence or regular expression.
.. versionadded:: 0.20.0
`pat` also accepts a compiled regex.
repl : string or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
.. versionadded:: 0.20.0
`repl` also accepts a callable.
n : int, default -1 (all)
Number of replacements to make from start
case : boolean, default None
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex
flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
- Cannot be set if `pat` is a compiled regex
regex : boolean, default True
- If True, assumes the passed-in pattern is a regular expression.
- If False, treats the pattern as a literal string
- Cannot be set to False if `pat` is a compiled regex or `repl` is
a callable.
.. versionadded:: 0.23.0
Returns
-------
Series or Index of object
A copy of the object with all matching occurrences of `pat` replaced by
`repl`.
Raises
------
ValueError
* if `regex` is False and `repl` is a callable or `pat` is a compiled
regex
* if `pat` is a compiled regex and `case` or `flags` is set
Notes
-----
When `pat` is a compiled regex, all flags should be included in the
compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled
regex will raise an error.
Examples
--------
When `pat` is a string and `regex` is True (the default), the given `pat`
is compiled as a regex. When `repl` is a string, it replaces matching
regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are
left as is:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True)
0 bao
1 baz
2 NaN
dtype: object
When `pat` is a string and `regex` is False, every `pat` is replaced with
`repl` as with :meth:`str.replace`:
>>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False)
0 bao
1 fuz
2 NaN
dtype: object
When `repl` is a callable, it is called on every `pat` using
:func:`re.sub`. The callable should expect one positional argument
(a regex object) and return a string.
To get the idea:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)
0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo
1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz
2 NaN
dtype: object
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)
0 oof 123
1 rab zab
2 NaN
dtype: object
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
>>> repl = lambda m: m.group('two').swapcase()
>>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)
0 tWO
1 bAR
dtype: object
Using a compiled regex with flags
>>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')
0 foo
1 bar
2 NaN
dtype: object
"""
# Check whether repl is valid (GH 13438, GH 15055)
if not (is_string_like(repl) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
if regex:
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError("case and flags cannot be set"
" when pat is a compiled regex")
else:
# not a compiled regex
# set default case
if case is None:
case = True
# add case flag, if provided
if case is False:
flags |= re.IGNORECASE
if is_compiled_re or len(pat) > 1 or flags or callable(repl):
n = n if n >= 0 else 0
compiled = re.compile(pat, flags=flags)
f = lambda x: compiled.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
else:
if is_compiled_re:
raise ValueError("Cannot use a compiled regex as replacement "
"pattern with regex=False")
if callable(repl):
raise ValueError("Cannot use a callable replacement when "
"regex=False")
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series or Index.
Parameters
----------
repeats : int or sequence of int
Same value for all (int) or different value per (sequence).
Returns
-------
Series or Index of object
Series or Index of repeated string objects specified by
input parameter repeats.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
Sequence of int repeats corresponding string in Series
>>> s.str.repeat(repeats=[1, 2, 3])
0 a
1 bb
2 ccc
"""
if is_scalar(repeats):
def rep(x):
try:
return compat.binary_type.__mul__(x, repeats)
except TypeError:
return compat.text_type.__mul__(x, repeats)
return _na_map(rep, arr)
else:
def rep(x, r):
try:
return compat.binary_type.__mul__(x, r)
except TypeError:
return compat.text_type.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = libops.vec_binop(com.values_from_object(arr), repeats, rep)
return result
def str_match(arr, pat, case=True, flags=0, na=np.nan):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values
Returns
-------
Series/array of boolean values
See Also
--------
contains : Analogous, but less strict, relying on re.search instead of
re.match.
extract : Extract matched groups.
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def _groups_or_na_fun(regex):
"""Used in both extract_noexpand and extract_frame"""
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, compat.string_types):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
def _str_extract_noexpand(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression. This function is called from
str_extract(expand=False), and can return Series, DataFrame, or
Index.
"""
from pandas import DataFrame, Index
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
if regex.groups == 1:
result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
if isinstance(arr, Index):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
result = DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=arr.index,
dtype=object)
return result, name
def _str_extract_frame(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat. This function is called from
str_extract(expand=True), and always returns a DataFrame.
"""
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if len(arr) == 0:
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
return DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=result_index,
dtype=object)
def str_extract(arr, pat, flags=0, expand=True):
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the
first match of regular expression `pat`.
Parameters
----------
pat : string
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
modify regular expression matching for things like case,
spaces, etc. For more details, see :mod:`re`.
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
.. versionadded:: 0.18.0
Returns
-------
DataFrame or Series or Index
A DataFrame with one row for each subject string, and one
column for each group. Any capture group names in regular
expression pat will be used for column names; otherwise
capture group numbers will be used. The dtype of each result
column is always object, even when no match is found. If
``expand=False`` and pat has only one capture group, then
return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : Returns all matches (not just the first match).
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = pd.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract(r'([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
return _str_extract_frame(arr._orig, pat, flags=flags)
else:
result, name = _str_extract_noexpand(arr._parent, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
def str_extractall(arr, pat, flags=0):
r"""
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
.. versionadded:: 0.18.0
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
A ``re`` module flag, for example ``re.IGNORECASE``. These allow
to modify regular expression matching for things like case, spaces,
etc. Multiple flags can be combined with the bitwise OR operator,
for example ``re.IGNORECASE | re.MULTILINE``.
Returns
-------
DataFrame
A ``DataFrame`` with one row for each match, and one column for each
group. Its rows have a ``MultiIndex`` with first levels that come from
the subject ``Series``. The last level is named 'match' and indexes the
matches in each item of the ``Series``. Any capture group names in
regular expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : Returns first match only (not all matches).
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall(r"[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall(r"[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall(r"(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndexClass):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.iteritems():
if isinstance(subject, compat.string_types):
if not is_mi:
subject_key = (subject_key, )
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, compat.string_types):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group
for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i, ))
index_list.append(result_key)
from pandas import MultiIndex
index = MultiIndex.from_tuples(
index_list, names=arr.index.names + ["match"])
result = arr._constructor_expanddim(match_list, index=index,
columns=columns)
return result
def str_get_dummies(arr, sep='|'):
"""
Split each string in the Series by sep and return a frame of
dummy/indicator variables.
Parameters
----------
sep : string, default "|"
String to split on.
Returns
-------
dummies : DataFrame
See Also
--------
get_dummies
Examples
--------
>>> pd.Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> pd.Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
"""
arr = arr.fillna('')
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
tags = sorted(tags - {""})
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
for i, t in enumerate(tags):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)
return dummies, tags
def str_join(arr, sep):
"""
Join lists contained as elements in the Series/Index with passed delimiter.
If the elements of a Series are lists themselves, join the content of these
lists using the delimiter passed to the function.
This function is an equivalent to :meth:`str.join`.
Parameters
----------
sep : str
Delimiter to use between list entries.
Returns
-------
Series/Index: object
The list entries concatenated by intervening occurrences of the
delimiter.
Raises
-------
AttributeError
If the supplied Series contains neither strings nor lists.
See Also
--------
str.join : Standard library version of this method.
Series.str.split : Split strings around given separator/delimiter.
Notes
-----
If any of the list items is not a string object, the result of the join
will be `NaN`.
Examples
--------
Example with a list that contains non-string elements.
>>> s = pd.Series([['lion', 'elephant', 'zebra'],
... [1.1, 2.2, 3.3],
... ['cat', np.nan, 'dog'],
... ['cow', 4.5, 'goat'],
... ['duck', ['swan', 'fish'], 'guppy']])
>>> s
0 [lion, elephant, zebra]
1 [1.1, 2.2, 3.3]
2 [cat, nan, dog]
3 [cow, 4.5, goat]
4 [duck, [swan, fish], guppy]
dtype: object
Join all lists using a '-'. The lists containing object(s) of types other
than str will produce a NaN.
>>> s.str.join('-')
0 lion-elephant-zebra
1 NaN
2 NaN
3 NaN
4 NaN
dtype: object
"""
return _na_map(sep.join, arr)
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the Series/Index.
Equivalent to applying :func:`re.findall` to all the elements in the
Series/Index.
Parameters
----------
pat : string
Pattern or regular expression.
flags : int, default 0
``re`` module flags, e.g. `re.IGNORECASE` (default is 0, which means
no flags).
Returns
-------
Series/Index of lists of strings
All non-overlapping matches of pattern or regular expression in each
string of this Series/Index.
See Also
--------
count : Count occurrences of pattern or regular expression in each string
of the Series/Index.
extractall : For each string in the Series, extract groups from all matches
of regular expression and return a DataFrame with one row for each
match and one column for each group.
re.findall : The equivalent ``re`` function to all non-overlapping matches
of pattern or regular expression in string, as a list of strings.
Examples
--------
>>> s = pd.Series(['Lion', 'Monkey', 'Rabbit'])
The search for the pattern 'Monkey' returns one match:
>>> s.str.findall('Monkey')
0 []
1 [Monkey]
2 []
dtype: object
On the other hand, the search for the pattern 'MONKEY' doesn't return any
match:
>>> s.str.findall('MONKEY')
0 []
1 []
2 []
dtype: object
Flags can be added to the pattern or regular expression. For instance,
to find the pattern 'MONKEY' ignoring the case:
>>> import re
>>> s.str.findall('MONKEY', flags=re.IGNORECASE)
0 []
1 [Monkey]
2 []
dtype: object
When the pattern matches more than one string in the Series, all matches
are returned:
>>> s.str.findall('on')
0 [on]
1 [on]
2 []
dtype: object
Regular expressions are supported too. For instance, the search for all the
strings ending with the word 'on' is shown next:
>>> s.str.findall('on$')
0 [on]
1 []
2 []
dtype: object
If the pattern is found more than once in the same string, then a list of
multiple strings is returned:
>>> s.str.findall('b')
0 []
1 []
2 [b, b]
dtype: object
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
def str_find(arr, sub, start=0, end=None, side='left'):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``
Returns
-------
found : Series/Index of integer values
"""
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'find'
elif side == 'right':
method = 'rfind'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_index(arr, sub, start=0, end=None, side='left'):
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'index'
elif side == 'right':
method = 'rindex'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_pad(arr, width, side='left', fillchar=' '):
"""
Pad strings in the Series/Index up to width.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with character defined in `fillchar`.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
Series or Index of object
Returns Series or Index with minimum number of char in object.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='left')``.
Series.str.ljust : Fills the right side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='right')``.
Series.str.center : Fills boths sides of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='both')``.
Series.str.zfill : Pad strings in the Series/Index by prepending '0'
character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> s = pd.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
if not isinstance(fillchar, compat.string_types):
msg = 'fillchar must be a character, not {0}'
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if not is_integer(width):
msg = 'width must be of integer type, not {0}'
raise TypeError(msg.format(type(width).__name__))
if side == 'left':
f = lambda x: x.rjust(width, fillchar)
elif side == 'right':
f = lambda x: x.ljust(width, fillchar)
elif side == 'both':
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_split(arr, pat=None, n=None):
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
res = _na_map(f, arr)
return res
def str_rsplit(arr, pat=None, n=None):
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
res = _na_map(f, arr)
return res
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series or Index of object
Series or Index from sliced substring from original string object.
See Also
--------
Series.str.slice_replace : Replace a slice with a string.
Series.str.get : Return element at position.
Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`
being the position.
Examples
--------
>>> s = pd.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
Equivalent behaviour to:
>>> s.str[0:5:3]
0 kl
1 f
2 cm
dtype: object
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a positional slice of a string with another value.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
replaced : Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
if repl is None:
repl = ''
def f(x):
if x[start:stop] == '':
local_stop = start
else:
local_stop = stop
y = ''
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
def str_strip(arr, to_strip=None, side='both'):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
stripped : Series/Index of objects
"""
if side == 'both':
f = lambda x: x.strip(to_strip)
elif side == 'left':
f = lambda x: x.lstrip(to_strip)
elif side == 'right':
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line-width
expand_tabs : bool, optional
If true, tab characters will be expanded to spaces (default: True)
replace_whitespace : bool, optional
If true, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True)
drop_whitespace : bool, optional
If true, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True)
break_long_words : bool, optional
If true, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width. (default: True)
break_on_hyphens : bool, optional
If true, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words. (default: True)
Returns
-------
wrapped : Series/Index of objects
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
"""
kwargs['width'] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: '\n'.join(tw.wrap(s)), arr)
def str_translate(arr, table, deletechars=None):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`. Note that the optional
argument deletechars is only valid if you are using python 2. For python 3,
character deletion should be specified via the table argument.
Parameters
----------
table : dict (python 3), str or None (python 2)
In python 3, table is a mapping of Unicode ordinals to Unicode
ordinals, strings, or None. Unmapped characters are left untouched.
Characters mapped to None are deleted. :meth:`str.maketrans` is a
helper function for making translation tables.
In python 2, table is either a string of length 256 or None. If the
table argument is None, no translation is applied and the operation
simply removes the characters in deletechars. :func:`string.maketrans`
is a helper function for making translation tables.
deletechars : str, optional (python 2)
A string of characters to delete. This argument is only valid
in python 2.
Returns
-------
translated : Series/Index of objects
"""
if deletechars is None:
f = lambda x: x.translate(table)
else:
if compat.PY3:
raise ValueError("deletechars is not a valid argument for "
"str.translate in python 3. You should simply "
"specify character deletions in the table "
"argument")
f = lambda x: x.translate(table, deletechars)
return _na_map(f, arr)
def str_get(arr, i):
"""
Extract element from each component at specified position.
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
items : Series/Index of objects
Examples
--------
>>> s = pd.Series(["String",
(1, 2, 3),
["a", "b", "c"],
123, -456,
{1:"Hello", "2":"World"}])
>>> s
0 String
1 (1, 2, 3)
2 [a, b, c]
3 123
4 -456
5 {1: 'Hello', '2': 'World'}
dtype: object
>>> s.str.get(1)
0 t
1 2
2 b
3 NaN
4 NaN
5 Hello
dtype: object
>>> s.str.get(-1)
0 g
1 3
2 c
3 NaN
4 NaN
5 NaN
dtype: object
"""
def f(x):
if isinstance(x, dict):
return x.get(i)
elif len(x) > i >= -len(x):
return x[i]
return np.nan
return _na_map(f, arr)
def str_decode(arr, encoding, errors="strict"):
"""
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
decoded : Series/Index of objects
"""
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
return _na_map(f, arr)
def str_encode(arr, encoding, errors="strict"):
"""
Encode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
encoded : Series/Index of objects
"""
if encoding in _cpython_optimized_encoders:
# CPython optimized implementation
f = lambda x: x.encode(encoding, errors)
else:
encoder = codecs.getencoder(encoding)
f = lambda x: encoder(x, errors)[0]
return _na_map(f, arr)
def _noarg_wrapper(f, docstring=None, **kargs):
def wrapper(self):
result = _na_map(f, self._parent, **kargs)
return self._wrap_result(result)
wrapper.__name__ = f.__name__
if docstring is not None:
wrapper.__doc__ = docstring
else:
raise ValueError('Provide docstring')
return wrapper
def _pat_wrapper(f, flags=False, na=False, **kwargs):
def wrapper1(self, pat):
result = f(self._parent, pat)
return self._wrap_result(result)
def wrapper2(self, pat, flags=0, **kwargs):
result = f(self._parent, pat, flags=flags, **kwargs)
return self._wrap_result(result)
def wrapper3(self, pat, na=np.nan):
result = f(self._parent, pat, na=na)
return self._wrap_result(result)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
class StringMethods(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index. NAs stay NA unless
handled otherwise by a particular method. Patterned after Python's string
methods, with some inspiration from R's stringr package.
Examples
--------
>>> s.str.split('_')
>>> s.str.replace('_', '')
"""
def __init__(self, data):
self._validate(data)
self._is_categorical = is_categorical_dtype(data)
# .values.categories works for both Series/Index
self._parent = data.values.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
@staticmethod
def _validate(data):
from pandas.core.index import Index
if (isinstance(data, ABCSeries) and
not ((is_categorical_dtype(data.dtype) and
is_object_dtype(data.values.categories)) or
(is_object_dtype(data.dtype)))):
# it's neither a string series not a categorical series with
# strings inside the categories.
# this really should exclude all series with any non-string values
# (instead of test for object dtype), but that isn't practical for
# performance reasons until we have a str dtype (GH 9343)
raise AttributeError("Can only use .str accessor with string "
"values, which use np.object_ dtype in "
"pandas")
elif isinstance(data, Index):
# can't use ABCIndex to exclude non-str
# see src/inference.pyx which can contain string values
allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer')
if is_categorical_dtype(data.dtype):
inf_type = data.categories.inferred_type
else:
inf_type = data.inferred_type
if inf_type not in allowed_types:
message = ("Can only use .str accessor with string values "
"(i.e. inferred_type is 'string', 'unicode' or "
"'mixed')")
raise AttributeError(message)
if data.nlevels > 1:
message = ("Can only use .str accessor with Index, not "
"MultiIndex")
raise AttributeError(message)
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def __iter__(self):
i = 0
g = self.get(i)
while g.notna().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(self, result, use_codes=True,
name=None, expand=None, fill_value=np.nan):
from pandas import Index, Series, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
# if self._orig is a CategoricalIndex, there is no .cat-accessor
result = take_1d(result, Series(self._orig, copy=False).cat.codes,
fill_value=fill_value)
if not hasattr(result, 'ndim') or not hasattr(result, 'dtype'):
return result
assert result.ndim < 3
if expand is None:
# infer from ndim if expand is not specified
expand = False if result.ndim == 1 else True
elif expand is True and not isinstance(self._orig, Index):
# required when expand=True is explicitly specified
# not needed when inferred
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if result:
# propagate nan values to match longest sequence (GH 18450)
max_len = max(len(x) for x in result)
result = [x * max_len if len(x) == 0 or x[0] is np.nan
else x for x in result]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, 'name', None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
# We had all tuples of length-one, which are
# better represented as a regular Index.
out = out.get_level_values(0)
return out
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
return cons(result, columns=name, index=index)
else:
# Must be a Series
cons = self._orig._constructor
return cons(result, name=name, index=index)
def _get_series_list(self, others, ignore_index=False):
"""
Auxiliary function for :meth:`str.cat`. Turn potentially mixed input
into a list of Series (elements without an index must match the length
of the calling Series/Index).
Parameters
----------
others : Series, Index, DataFrame, np.ndarray, list-like or list-like
of objects that are Series, Index or np.ndarray (1-dim)
ignore_index : boolean, default False
Determines whether to forcefully align others with index of caller
Returns
-------
tuple : (others transformed into list of Series,
boolean whether FutureWarning should be raised)
"""
# Once str.cat defaults to alignment, this function can be simplified;
# will not need `ignore_index` and the second boolean output anymore
from pandas import Index, Series, DataFrame
# self._orig is either Series or Index
idx = self._orig if isinstance(self._orig, Index) else self._orig.index
err_msg = ('others must be Series, Index, DataFrame, np.ndarrary or '
'list-like (either containing only strings or containing '
'only objects of type Series/Index/list-like/np.ndarray)')
# Generally speaking, all objects without an index inherit the index
# `idx` of the calling Series/Index - i.e. must have matching length.
# Objects with an index (i.e. Series/Index/DataFrame) keep their own
# index, *unless* ignore_index is set to True.
if isinstance(others, Series):
warn = not others.index.equals(idx)
# only reconstruct Series when absolutely necessary
los = [Series(others.values, index=idx)
if ignore_index and warn else others]
return (los, warn)
elif isinstance(others, Index):
warn = not others.equals(idx)
los = [Series(others.values,
index=(idx if ignore_index else others))]
return (los, warn)
elif isinstance(others, DataFrame):
warn = not others.index.equals(idx)
if ignore_index and warn:
# without copy, this could change "others"
# that was passed to str.cat
others = others.copy()
others.index = idx
return ([others[x] for x in others], warn)
elif isinstance(others, np.ndarray) and others.ndim == 2:
others = DataFrame(others, index=idx)
return ([others[x] for x in others], False)
elif is_list_like(others, allow_sets=False):
others = list(others) # ensure iterators do not get read twice etc
# in case of list-like `others`, all elements must be
# either one-dimensional list-likes or scalars
if all(is_list_like(x, allow_sets=False) for x in others):
los = []
join_warn = False
depr_warn = False
# iterate through list and append list of series for each
# element (which we check to be one-dimensional and non-nested)
while others:
nxt = others.pop(0) # nxt is guaranteed list-like by above
# GH 21950 - DeprecationWarning
# only allowing Series/Index/np.ndarray[1-dim] will greatly
# simply this function post-deprecation.
if not (isinstance(nxt, (Series, Index)) or
(isinstance(nxt, np.ndarray) and nxt.ndim == 1)):
depr_warn = True
if not isinstance(nxt, (DataFrame, Series,
Index, np.ndarray)):
# safety for non-persistent list-likes (e.g. iterators)
# do not map indexed/typed objects; info needed below
nxt = list(nxt)
# known types for which we can avoid deep inspection
no_deep = ((isinstance(nxt, np.ndarray) and nxt.ndim == 1)
or isinstance(nxt, (Series, Index)))
# nested list-likes are forbidden:
# -> elements of nxt must not be list-like
is_legal = ((no_deep and nxt.dtype == object)
or all(not is_list_like(x) for x in nxt))
# DataFrame is false positive of is_legal
# because "x in df" returns column names
if not is_legal or isinstance(nxt, DataFrame):
raise TypeError(err_msg)
nxt, wnx = self._get_series_list(nxt,
ignore_index=ignore_index)
los = los + nxt
join_warn = join_warn or wnx
if depr_warn:
warnings.warn('list-likes other than Series, Index, or '
'np.ndarray WITHIN another list-like are '
'deprecated and will be removed in a future '
'version.', FutureWarning, stacklevel=3)
return (los, join_warn)
elif all(not is_list_like(x) for x in others):
return ([Series(others, index=idx)], False)
raise TypeError(err_msg)
def cat(self, others=None, sep=None, na_rep=None, join=None):
"""
Concatenate strings in the Series/Index with given separator.
If `others` is specified, this function concatenates the Series/Index
and elements of `others` element-wise.
If `others` is not passed, then all values in the Series/Index are
concatenated into a single string with a given `sep`.
Parameters
----------
others : Series, Index, DataFrame, np.ndarrary or list-like
Series, Index, DataFrame, np.ndarray (one- or two-dimensional) and
other list-likes of strings must have the same length as the
calling Series/Index, with the exception of indexed objects (i.e.
Series/Index/DataFrame) if `join` is not None.
If others is a list-like that contains a combination of Series,
Index or np.ndarray (1-dim), then all elements will be unpacked and
must satisfy the above criteria individually.
If others is None, the method returns the concatenation of all
strings in the calling Series/Index.
sep : str, default ''
The separator between the different elements/columns. By default
the empty string `''` is used.
na_rep : str or None, default None
Representation that is inserted for all missing values:
- If `na_rep` is None, and `others` is None, missing values in the
Series/Index are omitted from the result.
- If `na_rep` is None, and `others` is not None, a row containing a
missing value in any of the columns (before concatenation) will
have a missing value in the result.
join : {'left', 'right', 'outer', 'inner'}, default None
Determines the join-style between the calling Series/Index and any
Series/Index/DataFrame in `others` (objects without an index need
to match the length of the calling Series/Index). If None,
alignment is disabled, but this option will be removed in a future
version of pandas and replaced with a default of `'left'`. To
disable alignment, use `.values` on any Series/Index/DataFrame in
`others`.
.. versionadded:: 0.23.0
Returns
-------
concat : str or Series/Index of objects
If `others` is None, `str` is returned, otherwise a `Series/Index`
(same type as caller) of objects is returned.
See Also
--------
split : Split each string in the Series/Index.
join : Join lists contained as elements in the Series/Index.
Examples
--------
When not passing `others`, all values are concatenated into a single
string:
>>> s = pd.Series(['a', 'b', np.nan, 'd'])
>>> s.str.cat(sep=' ')
'a b d'
By default, NA values in the Series are ignored. Using `na_rep`, they
can be given a representation:
>>> s.str.cat(sep=' ', na_rep='?')
'a b ? d'
If `others` is specified, corresponding values are concatenated with
the separator. Result will be a Series of strings.
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')
0 a,A
1 b,B
2 NaN
3 d,D
dtype: object
Missing values will remain missing in the result, but can again be
represented using `na_rep`
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')
0 a,A
1 b,B
2 -,C
3 d,D
dtype: object
If `sep` is not specified, the values are concatenated without
separation.
>>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')
0 aA
1 bB
2 -C
3 dD
dtype: object
Series with different indexes can be aligned before concatenation. The
`join`-keyword works as in other methods.
>>> t = pd.Series(['d', 'a', 'e', 'c'], index=[3, 0, 4, 2])
>>> s.str.cat(t, join='left', na_rep='-')
0 aa
1 b-
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='outer', na_rep='-')
0 aa
1 b-
2 -c
3 dd
4 -e
dtype: object
>>>
>>> s.str.cat(t, join='inner', na_rep='-')
0 aa
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='right', na_rep='-')
3 dd
0 aa
4 -e
2 -c
dtype: object
For more examples, see :ref:`here <text.concatenate>`.
"""
from pandas import Index, Series, concat
if isinstance(others, compat.string_types):
raise ValueError("Did you mean to supply a `sep` keyword?")
if sep is None:
sep = ''
if isinstance(self._orig, Index):
data = Series(self._orig, index=self._orig)
else: # Series
data = self._orig
# concatenate Series/Index with itself if no "others"
if others is None:
data = ensure_object(data)
na_mask = isna(data)
if na_rep is None and na_mask.any():
data = data[~na_mask]
elif na_rep is not None and na_mask.any():
data = np.where(na_mask, na_rep, data)
return sep.join(data)
try:
# turn anything in "others" into lists of Series
others, warn = self._get_series_list(others,
ignore_index=(join is None))
except ValueError: # do not catch TypeError raised by _get_series_list
if join is None:
raise ValueError('All arrays must be same length, except '
'those having an index if `join` is not None')
else:
raise ValueError('If `others` contains arrays or lists (or '
'other list-likes without an index), these '
'must all be of the same length as the '
'calling Series/Index.')
if join is None and warn:
warnings.warn("A future version of pandas will perform index "
"alignment when `others` is a Series/Index/"
"DataFrame (or a list-like containing one). To "
"disable alignment (the behavior before v.0.23) and "
"silence this warning, use `.values` on any Series/"
"Index/DataFrame in `others`. To enable alignment "
"and silence this warning, pass `join='left'|"
"'outer'|'inner'|'right'`. The future default will "
"be `join='left'`.", FutureWarning, stacklevel=2)
# if join is None, _get_series_list already force-aligned indexes
join = 'left' if join is None else join
# align if required
if any(not data.index.equals(x.index) for x in others):
# Need to add keys for uniqueness in case of duplicate columns
others = concat(others, axis=1,
join=(join if join == 'inner' else 'outer'),
keys=range(len(others)), sort=False, copy=False)
data, others = data.align(others, join=join)
others = [others[x] for x in others] # again list of Series
all_cols = [ensure_object(x) for x in [data] + others]
na_masks = np.array([isna(x) for x in all_cols])
union_mask = np.logical_or.reduce(na_masks, axis=0)
if na_rep is None and union_mask.any():
# no na_rep means NaNs for all rows where any column has a NaN
# only necessary if there are actually any NaNs
result = np.empty(len(data), dtype=object)
np.putmask(result, union_mask, np.nan)
not_masked = ~union_mask
result[not_masked] = cat_core([x[not_masked] for x in all_cols],
sep)
elif na_rep is not None and union_mask.any():
# fill NaNs with na_rep in case there are actually any NaNs
all_cols = [np.where(nm, na_rep, col)
for nm, col in zip(na_masks, all_cols)]
result = cat_core(all_cols, sep)
else:
# no NaNs - can just concatenate
result = cat_core(all_cols, sep)
if isinstance(self._orig, Index):
# add dtype for case that result is all-NA
result = Index(result, dtype=object, name=self._orig.name)
else: # Series
result = Series(result, dtype=object, index=data.index,
name=self._orig.name)
return result
_shared_docs['str_split'] = ("""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the %(side)s,
at the specified delimiter string. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
pat : str, optional
String or regular expression to split on.
If not specified, split on whitespace.
n : int, default -1 (all)
Limit number of splits in output.
``None``, 0 and -1 will be interpreted as return all splits.
expand : bool, default False
Expand the splitted strings into separate columns.
* If ``True``, return DataFrame/MultiIndex expanding dimensionality.
* If ``False``, return Series/Index, containing lists of strings.
Returns
-------
Series, Index, DataFrame or MultiIndex
Type matches caller unless ``expand=True`` (see Notes).
See Also
--------
Series.str.split : Split strings around given separator/delimiter.
Series.str.rsplit : Splits string around given separator/delimiter,
starting from the right.
Series.str.join : Join lists contained as elements in the Series/Index
with passed delimiter.
str.split : Standard library version for split.
str.rsplit : Standard library version for rsplit.
Notes
-----
The handling of the `n` keyword depends on the number of found splits:
- If found splits > `n`, make first `n` splits only
- If found splits <= `n`, make all splits
- If for a certain row the number of found splits < `n`,
append `None` for padding up to `n` if ``expand=True``
If using ``expand=True``, Series and Index callers return DataFrame and
MultiIndex objects, respectively.
Examples
--------
>>> s = pd.Series(["this is a regular sentence",
"https://docs.python.org/3/tutorial/index.html", np.nan])
In the default setting, the string is split by whitespace.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
Without the `n` parameter, the outputs of `rsplit` and `split`
are identical.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `n` parameter can be used to limit the number of splits on the
delimiter. The outputs of `split` and `rsplit` are different.
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
>>> s.str.rsplit(n=2)
0 [this is a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `pat` parameter can be used to split by other characters.
>>> s.str.split(pat = "/")
0 [this is a regular sentence]
1 [https:, , docs.python.org, 3, tutorial, index...
2 NaN
dtype: object
When using ``expand=True``, the split elements will expand out into
separate columns. If NaN is present, it is propagated throughout
the columns during the split.
>>> s.str.split(expand=True)
0 1 2 3
0 this is a regular
1 https://docs.python.org/3/tutorial/index.html None None None
2 NaN NaN NaN NaN \
4
0 sentence
1 None
2 NaN
For slightly more complex use cases like splitting the html document name
from a url, a combination of parameter settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence None
1 https://docs.python.org/3/tutorial index.html
2 NaN NaN
""")
@Appender(_shared_docs['str_split'] % {
'side': 'beginning',
'method': 'split'})
def split(self, pat=None, n=-1, expand=False):
result = str_split(self._parent, pat, n=n)
return self._wrap_result(result, expand=expand)
@Appender(_shared_docs['str_split'] % {
'side': 'end',
'method': 'rsplit'})
def rsplit(self, pat=None, n=-1, expand=False):
result = str_rsplit(self._parent, pat, n=n)
return self._wrap_result(result, expand=expand)
_shared_docs['str_partition'] = ("""
Split the string at the %(side)s occurrence of `sep`.
This method splits the string at the %(side)s occurrence of `sep`,
and returns 3 elements containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
sep : str, default whitespace
String to split on.
pat : str, default whitespace
.. deprecated:: 0.24.0
Use ``sep`` instead
expand : bool, default True
If True, return DataFrame/MultiIndex expanding dimensionality.
If False, return Series/Index.
Returns
-------
DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Series.str.split : Split strings around given separators.
str.partition : Standard library version.
Examples
--------
>>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.partition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by the last space instead of the first one:
>>> s.str.rpartition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by something different than a space:
>>> s.str.partition('-')
0 1 2
0 Linda van der Berg
1 George Pitt - Rivers
To return a Series containining tuples instead of a DataFrame:
>>> s.str.partition('-', expand=False)
0 (Linda van der Berg, , )
1 (George Pitt, -, Rivers)
dtype: object
Also available on indices:
>>> idx = pd.Index(['X 123', 'Y 999'])
>>> idx
Index(['X 123', 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.partition()
MultiIndex(levels=[['X', 'Y'], [' '], ['123', '999']],
codes=[[0, 1], [0, 0], [0, 1]])
Or an index with tuples with ``expand=False``:
>>> idx.str.partition(expand=False)
Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object')
""")
@Appender(_shared_docs['str_partition'] % {
'side': 'first',
'return': '3 elements containing the string itself, followed by two '
'empty strings',
'also': 'rpartition : Split the string at the last occurrence of '
'`sep`.'
})
@deprecate_kwarg(old_arg_name='pat', new_arg_name='sep')
def partition(self, sep=' ', expand=True):
f = lambda x: x.partition(sep)
result = _na_map(f, self._parent)
return self._wrap_result(result, expand=expand)
@Appender(_shared_docs['str_partition'] % {
'side': 'last',
'return': '3 elements containing two empty strings, followed by the '
'string itself',
'also': 'partition : Split the string at the first occurrence of '
'`sep`.'
})
@deprecate_kwarg(old_arg_name='pat', new_arg_name='sep')
def rpartition(self, sep=' ', expand=True):
f = lambda x: x.rpartition(sep)
result = _na_map(f, self._parent)
return self._wrap_result(result, expand=expand)
@copy(str_get)
def get(self, i):
result = str_get(self._parent, i)
return self._wrap_result(result)
@copy(str_join)
def join(self, sep):
result = str_join(self._parent, sep)
return self._wrap_result(result)
@copy(str_contains)
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(self._parent, pat, case=case, flags=flags, na=na,
regex=regex)
return self._wrap_result(result, fill_value=na)
@copy(str_match)
def match(self, pat, case=True, flags=0, na=np.nan):
result = str_match(self._parent, pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na)
@copy(str_replace)
def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):
result = str_replace(self._parent, pat, repl, n=n, case=case,
flags=flags, regex=regex)
return self._wrap_result(result)
@copy(str_repeat)
def repeat(self, repeats):
result = str_repeat(self._parent, repeats)
return self._wrap_result(result)
@copy(str_pad)
def pad(self, width, side='left', fillchar=' '):
result = str_pad(self._parent, width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs['str_pad'] = ("""
Filling %(side)s side of strings in the Series/Index with an
additional character. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : Series/Index of objects
""")
@Appender(_shared_docs['str_pad'] % dict(side='left and right',
method='center'))
def center(self, width, fillchar=' '):
return self.pad(width, side='both', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='right', method='ljust'))
def ljust(self, width, fillchar=' '):
return self.pad(width, side='right', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='left', method='rjust'))
def rjust(self, width, fillchar=' '):
return self.pad(width, side='left', fillchar=fillchar)
def zfill(self, width):
"""
Pad strings in the Series/Index by prepending '0' characters.
Strings in the Series/Index are padded with '0' characters on the
left of the string to reach a total string length `width`. Strings
in the Series/Index with length greater or equal to `width` are
unchanged.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less
than `width` be prepended with '0' characters.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character.
Series.str.ljust : Fills the right side of strings with an arbitrary
character.
Series.str.pad : Fills the specified sides of strings with an arbitrary
character.
Series.str.center : Fills boths sides of strings with an arbitrary
character.
Notes
-----
Differs from :meth:`str.zfill` which has special handling
for '+'/'-' in the string.
Examples
--------
>>> s = pd.Series(['-1', '1', '1000', 10, np.nan])
>>> s
0 -1
1 1
2 1000
3 10
4 NaN
dtype: object
Note that ``10`` and ``NaN`` are not strings, therefore they are
converted to ``NaN``. The minus sign in ``'-1'`` is treated as a
regular character and the zero is added to the left of it
(:meth:`str.zfill` would have moved it to the left). ``1000``
remains unchanged as it is longer than `width`.
>>> s.str.zfill(3)
0 0-1
1 001
2 1000
3 NaN
4 NaN
dtype: object
"""
result = str_pad(self._parent, width, side='left', fillchar='0')
return self._wrap_result(result)
@copy(str_slice)
def slice(self, start=None, stop=None, step=None):
result = str_slice(self._parent, start, stop, step)
return self._wrap_result(result)
@copy(str_slice_replace)
def slice_replace(self, start=None, stop=None, repl=None):
result = str_slice_replace(self._parent, start, stop, repl)
return self._wrap_result(result)
@copy(str_decode)
def decode(self, encoding, errors="strict"):
result = str_decode(self._parent, encoding, errors)
return self._wrap_result(result)
@copy(str_encode)
def encode(self, encoding, errors="strict"):
result = str_encode(self._parent, encoding, errors)
return self._wrap_result(result)
_shared_docs['str_strip'] = (r"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the Series/Index from %(side)s.
Equivalent to :meth:`str.%(method)s`.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.strip : Remove leading and trailing characters in Series/Index.
Series.str.lstrip : Remove leading characters in Series/Index.
Series.str.rstrip : Remove trailing characters in Series/Index.
Examples
--------
>>> s = pd.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', np.nan])
>>> s
0 1. Ant.
1 2. Bee!\n
2 3. Cat?\t
3 NaN
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 3. Cat?
3 NaN
dtype: object
>>> s.str.lstrip('123.')
0 Ant.
1 Bee!\n
2 Cat?\t
3 NaN
dtype: object
>>> s.str.rstrip('.!? \n\t')
0 1. Ant
1 2. Bee
2 3. Cat
3 NaN
dtype: object
>>> s.str.strip('123.!? \n\t')
0 Ant
1 Bee
2 Cat
3 NaN
dtype: object
""")
@Appender(_shared_docs['str_strip'] % dict(side='left and right sides',
method='strip'))
def strip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side='both')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='left side',
method='lstrip'))
def lstrip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='right side',
method='rstrip'))
def rstrip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side='right')
return self._wrap_result(result)
@copy(str_wrap)
def wrap(self, width, **kwargs):
result = str_wrap(self._parent, width, **kwargs)
return self._wrap_result(result)
@copy(str_get_dummies)
def get_dummies(self, sep='|'):
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
data = self._orig.astype(str) if self._is_categorical else self._parent
result, name = str_get_dummies(data, sep)
return self._wrap_result(result, use_codes=(not self._is_categorical),
name=name, expand=True)
@copy(str_translate)
def translate(self, table, deletechars=None):
result = str_translate(self._parent, table, deletechars)
return self._wrap_result(result)
count = _pat_wrapper(str_count, flags=True)
startswith = _pat_wrapper(str_startswith, na=True)
endswith = _pat_wrapper(str_endswith, na=True)
findall = _pat_wrapper(str_findall, flags=True)
@copy(str_extract)
def extract(self, pat, flags=0, expand=True):
return str_extract(self, pat, flags=flags, expand=expand)
@copy(str_extractall)
def extractall(self, pat, flags=0):
return str_extractall(self._orig, pat, flags=flags)
_shared_docs['find'] = ("""
Return %(side)s indexes in each strings in the Series/Index
where the substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of integer values
See Also
--------
%(also)s
""")
@Appender(_shared_docs['find'] %
dict(side='lowest', method='find',
also='rfind : Return highest indexes in each strings.'))
def find(self, sub, start=0, end=None):
result = str_find(self._parent, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['find'] %
dict(side='highest', method='rfind',
also='find : Return lowest indexes in each strings.'))
def rfind(self, sub, start=0, end=None):
result = str_find(self._parent, sub,
start=start, end=end, side='right')
return self._wrap_result(result)
def normalize(self, form):
"""
Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form
Returns
-------
normalized : Series/Index of objects
"""
import unicodedata
f = lambda x: unicodedata.normalize(form, compat.u_safe(x))
result = _na_map(f, self._parent)
return self._wrap_result(result)
_shared_docs['index'] = ("""
Return %(side)s indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.%(similar)s`` except instead of returning -1, it raises a ValueError
when the substring is not found. Equivalent to standard ``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of objects
See Also
--------
%(also)s
""")
@Appender(_shared_docs['index'] %
dict(side='lowest', similar='find', method='index',
also='rindex : Return highest indexes in each strings.'))
def index(self, sub, start=0, end=None):
result = str_index(self._parent, sub,
start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['index'] %
dict(side='highest', similar='rfind', method='rindex',
also='index : Return lowest indexes in each strings.'))
def rindex(self, sub, start=0, end=None):
result = str_index(self._parent, sub,
start=start, end=end, side='right')
return self._wrap_result(result)
_shared_docs['len'] = ("""
Computes the length of each element in the Series/Index. The element may be
a sequence (such as a string, tuple or list) or a collection
(such as a dictionary).
Returns
-------
Series or Index of int
A Series or Index of integer values indicating the length of each
element in the Series or Index.
See Also
--------
str.len : Python built-in function returning the length of an object.
Series.size : Returns the length of the Series.
Examples
--------
Returns the length (number of characters) in a string. Returns the
number of entries for dictionaries, lists or tuples.
>>> s = pd.Series(['dog',
... '',
... 5,
... {'foo' : 'bar'},
... [2, 3, 5, 7],
... ('one', 'two', 'three')])
>>> s
0 dog
1
2 5
3 {'foo': 'bar'}
4 [2, 3, 5, 7]
5 (one, two, three)
dtype: object
>>> s.str.len()
0 3.0
1 0.0
2 NaN
3 1.0
4 4.0
5 3.0
dtype: float64
""")
len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int)
_shared_docs['casemethods'] = ("""
Convert strings in the Series/Index to %(type)s.
Equivalent to :meth:`str.%(method)s`.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.lower : Converts all characters to lowercase.
Series.str.upper : Converts all characters to uppercase.
Series.str.title : Converts first character of each word to uppercase and
remaining to lowercase.
Series.str.capitalize : Converts first character to uppercase and
remaining to lowercase.
Series.str.swapcase : Converts uppercase to lowercase and lowercase to
uppercase.
Examples
--------
>>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: object
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: object
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: object
""")
_shared_docs['lower'] = dict(type='lowercase', method='lower')
_shared_docs['upper'] = dict(type='uppercase', method='upper')
_shared_docs['title'] = dict(type='titlecase', method='title')
_shared_docs['capitalize'] = dict(type='be capitalized',
method='capitalize')
_shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase')
lower = _noarg_wrapper(lambda x: x.lower(),
docstring=_shared_docs['casemethods'] %
_shared_docs['lower'])
upper = _noarg_wrapper(lambda x: x.upper(),
docstring=_shared_docs['casemethods'] %
_shared_docs['upper'])
title = _noarg_wrapper(lambda x: x.title(),
docstring=_shared_docs['casemethods'] %
_shared_docs['title'])
capitalize = _noarg_wrapper(lambda x: x.capitalize(),
docstring=_shared_docs['casemethods'] %
_shared_docs['capitalize'])
swapcase = _noarg_wrapper(lambda x: x.swapcase(),
docstring=_shared_docs['casemethods'] %
_shared_docs['swapcase'])
_shared_docs['ismethods'] = ("""
Check whether all characters in each string are %(type)s.
This is equivalent to running the Python string method
:meth:`str.%(method)s` for each element of the Series/Index. If a string
has zero characters, ``False`` is returned for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same length as the original
Series/Index.
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
**Checks for Alphabetic and Numeric Characters**
>>> s1 = pd.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
dtype: bool
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with any additional punctuation
or whitespace will evaluate to false for an alphanumeric check.
>>> s2 = pd.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
**More Detailed Checks for Numeric Characters**
There are several different but overlapping sets of numeric characters that
can be checked for.
>>> s3 = pd.Series(['23', '³', '⅕', ''])
The ``s3.str.isdecimal`` method checks for characters used to form numbers
in base 10.
>>> s3.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also
includes special digits, like superscripted and subscripted digits in
unicode.
>>> s3.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also
includes other characters that can represent quantities such as unicode
fractions.
>>> s3.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
**Checks for Whitespace**
>>> s4 = pd.Series([' ', '\\t\\r\\n ', ''])
>>> s4.str.isspace()
0 True
1 True
2 False
dtype: bool
**Checks for Character Case**
>>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s5.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
>>> s5.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
The ``s5.str.istitle`` method checks for whether all words are in title
case (whether only the first letter of each word is capitalized). Words are
assumed to be as any sequence of non-numeric characters seperated by
whitespace characters.
>>> s5.str.istitle()
0 False
1 True
2 False
3 False
dtype: bool
""")
_shared_docs['isalnum'] = dict(type='alphanumeric', method='isalnum')
_shared_docs['isalpha'] = dict(type='alphabetic', method='isalpha')
_shared_docs['isdigit'] = dict(type='digits', method='isdigit')
_shared_docs['isspace'] = dict(type='whitespace', method='isspace')
_shared_docs['islower'] = dict(type='lowercase', method='islower')
_shared_docs['isupper'] = dict(type='uppercase', method='isupper')
_shared_docs['istitle'] = dict(type='titlecase', method='istitle')
_shared_docs['isnumeric'] = dict(type='numeric', method='isnumeric')
_shared_docs['isdecimal'] = dict(type='decimal', method='isdecimal')
isalnum = _noarg_wrapper(lambda x: x.isalnum(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalnum'])
isalpha = _noarg_wrapper(lambda x: x.isalpha(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalpha'])
isdigit = _noarg_wrapper(lambda x: x.isdigit(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdigit'])
isspace = _noarg_wrapper(lambda x: x.isspace(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isspace'])
islower = _noarg_wrapper(lambda x: x.islower(),
docstring=_shared_docs['ismethods'] %
_shared_docs['islower'])
isupper = _noarg_wrapper(lambda x: x.isupper(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isupper'])
istitle = _noarg_wrapper(lambda x: x.istitle(),
docstring=_shared_docs['ismethods'] %
_shared_docs['istitle'])
isnumeric = _noarg_wrapper(lambda x: compat.u_safe(x).isnumeric(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isnumeric'])
isdecimal = _noarg_wrapper(lambda x: compat.u_safe(x).isdecimal(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdecimal'])
@classmethod
def _make_accessor(cls, data):
cls._validate(data)
return cls(data)
| bsd-3-clause |
sekikn/incubator-airflow | tests/providers/salesforce/hooks/test_salesforce.py | 7 | 9659 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest.mock import Mock, patch
import pandas as pd
from numpy import nan
from simple_salesforce import Salesforce
from airflow.models.connection import Connection
from airflow.providers.salesforce.hooks.salesforce import SalesforceHook
class TestSalesforceHook(unittest.TestCase):
def setUp(self):
self.salesforce_hook = SalesforceHook(conn_id="conn_id")
def test_get_conn_exists(self):
self.salesforce_hook.conn = Mock(spec=Salesforce)
self.salesforce_hook.get_conn()
self.assertIsNotNone(self.salesforce_hook.conn.return_value)
@patch(
"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.get_connection",
return_value=Connection(
login="username", password="password", extra='{"security_token": "token", "domain": "test"}'
),
)
@patch("airflow.providers.salesforce.hooks.salesforce.Salesforce")
def test_get_conn(self, mock_salesforce, mock_get_connection):
self.salesforce_hook.get_conn()
self.assertEqual(self.salesforce_hook.conn, mock_salesforce.return_value)
mock_salesforce.assert_called_once_with(
username=mock_get_connection.return_value.login,
password=mock_get_connection.return_value.password,
security_token=mock_get_connection.return_value.extra_dejson["security_token"],
instance_url=mock_get_connection.return_value.host,
domain=mock_get_connection.return_value.extra_dejson.get("domain"),
)
@patch("airflow.providers.salesforce.hooks.salesforce.Salesforce")
def test_make_query(self, mock_salesforce):
mock_salesforce.return_value.query_all.return_value = dict(totalSize=123, done=True)
self.salesforce_hook.conn = mock_salesforce.return_value
query = "SELECT * FROM table"
query_results = self.salesforce_hook.make_query(query, include_deleted=True)
mock_salesforce.return_value.query_all.assert_called_once_with(query, include_deleted=True)
self.assertEqual(query_results, mock_salesforce.return_value.query_all.return_value)
@patch("airflow.providers.salesforce.hooks.salesforce.Salesforce")
def test_describe_object(self, mock_salesforce):
obj = "obj_name"
mock_salesforce.return_value.__setattr__(obj, Mock(spec=Salesforce))
self.salesforce_hook.conn = mock_salesforce.return_value
obj_description = self.salesforce_hook.describe_object(obj)
mock_salesforce.return_value.__getattr__(obj).describe.assert_called_once_with()
self.assertEqual(obj_description, mock_salesforce.return_value.__getattr__(obj).describe.return_value)
@patch("airflow.providers.salesforce.hooks.salesforce.SalesforceHook.get_conn")
@patch(
"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.describe_object",
return_value={"fields": [{"name": "field_1"}, {"name": "field_2"}]},
)
def test_get_available_fields(self, mock_describe_object, mock_get_conn):
obj = "obj_name"
available_fields = self.salesforce_hook.get_available_fields(obj)
mock_get_conn.assert_called_once_with()
mock_describe_object.assert_called_once_with(obj)
self.assertEqual(available_fields, ["field_1", "field_2"])
@patch("airflow.providers.salesforce.hooks.salesforce.SalesforceHook.make_query")
def test_get_object_from_salesforce(self, mock_make_query):
salesforce_objects = self.salesforce_hook.get_object_from_salesforce(
obj="obj_name", fields=["field_1", "field_2"]
)
mock_make_query.assert_called_once_with("SELECT field_1,field_2 FROM obj_name")
self.assertEqual(salesforce_objects, mock_make_query.return_value)
def test_write_object_to_file_invalid_format(self):
with self.assertRaises(ValueError):
self.salesforce_hook.write_object_to_file(query_results=[], filename="test", fmt="test")
@patch(
"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records",
return_value=pd.DataFrame({"test": [1, 2, 3], "dict": [nan, nan, {"foo": "bar"}]}),
)
def test_write_object_to_file_csv(self, mock_data_frame):
mock_data_frame.return_value.to_csv = Mock()
filename = "test"
data_frame = self.salesforce_hook.write_object_to_file(query_results=[], filename=filename, fmt="csv")
mock_data_frame.return_value.to_csv.assert_called_once_with(filename, index=False)
# Note that the latest version of pandas dataframes (1.1.2) returns "nan" rather than "None" here
pd.testing.assert_frame_equal(
data_frame,
pd.DataFrame({"test": [1, 2, 3], "dict": ["nan", "nan", str({"foo": "bar"})]}),
check_index_type=False,
)
@patch(
"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.describe_object",
return_value={"fields": [{"name": "field_1", "type": "date"}]},
)
@patch(
"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records",
return_value=pd.DataFrame({"test": [1, 2, 3], "field_1": ["2019-01-01", "2019-01-02", "2019-01-03"]}),
)
def test_write_object_to_file_json_with_timestamp_conversion(self, mock_data_frame, mock_describe_object):
mock_data_frame.return_value.to_json = Mock()
filename = "test"
obj_name = "obj_name"
data_frame = self.salesforce_hook.write_object_to_file(
query_results=[{"attributes": {"type": obj_name}}],
filename=filename,
fmt="json",
coerce_to_timestamp=True,
)
mock_describe_object.assert_called_once_with(obj_name)
mock_data_frame.return_value.to_json.assert_called_once_with(filename, "records", date_unit="s")
pd.testing.assert_frame_equal(
data_frame, pd.DataFrame({"test": [1, 2, 3], "field_1": [1.546301e09, 1.546387e09, 1.546474e09]})
)
@patch("airflow.providers.salesforce.hooks.salesforce.time.time", return_value=1.23)
@patch(
"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records",
return_value=pd.DataFrame({"test": [1, 2, 3]}),
)
def test_write_object_to_file_ndjson_with_record_time(self, mock_data_frame, mock_time):
mock_data_frame.return_value.to_json = Mock()
filename = "test"
data_frame = self.salesforce_hook.write_object_to_file(
query_results=[], filename=filename, fmt="ndjson", record_time_added=True
)
mock_data_frame.return_value.to_json.assert_called_once_with(
filename, "records", lines=True, date_unit="s"
)
pd.testing.assert_frame_equal(
data_frame,
pd.DataFrame(
{
"test": [1, 2, 3],
"time_fetched_from_salesforce": [
mock_time.return_value,
mock_time.return_value,
mock_time.return_value,
],
}
),
)
@patch(
"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.describe_object",
return_value={"fields": [{"name": "field_1", "type": "date"}]},
)
@patch(
"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records",
return_value=pd.DataFrame({"test": [1, 2, 3], "field_1": ["2019-01-01", "2019-01-02", "2019-01-03"]}),
)
def test_obect_to_df_with_timestamp_conversion(self, mock_data_frame, mock_describe_object):
obj_name = "obj_name"
data_frame = self.salesforce_hook.object_to_df(
query_results=[{"attributes": {"type": obj_name}}],
coerce_to_timestamp=True,
)
mock_describe_object.assert_called_once_with(obj_name)
pd.testing.assert_frame_equal(
data_frame, pd.DataFrame({"test": [1, 2, 3], "field_1": [1.546301e09, 1.546387e09, 1.546474e09]})
)
@patch("airflow.providers.salesforce.hooks.salesforce.time.time", return_value=1.23)
@patch(
"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records",
return_value=pd.DataFrame({"test": [1, 2, 3]}),
)
def test_object_to_df_with_record_time(self, mock_data_frame, mock_time):
data_frame = self.salesforce_hook.object_to_df(query_results=[], record_time_added=True)
pd.testing.assert_frame_equal(
data_frame,
pd.DataFrame(
{
"test": [1, 2, 3],
"time_fetched_from_salesforce": [
mock_time.return_value,
mock_time.return_value,
mock_time.return_value,
],
}
),
)
| apache-2.0 |
VulpesCorsac/Ire-Polus | Find PID/Find_PID.py | 2 | 5265 | # (c) VulpesCorsac
import matplotlib.pyplot as plt
import numpy
import random
T_in_0 = 30 # Starting internal temperature
T_out_0 = 20 # Starting external temperature
T_needed = 35 # Temperature, we should reach and stabilise
t_delay = 10 # Amount of seconds that system does not feel power change
t_max = 3000 # Amount of seconds we're waiting for
t_disturb = 10 # Amount of seconds that we randomly change T_out
disturb_probability = 0.003 # Probability of disturbance
disturb_amount = 2 # Max amount of disturbance peaks
dT_disturbance = 1 # Degrees that outer temperature change
eta_0 = 5 # Strange koefficient
d_eta = 0.05 # Fluctuation
mu = 0.5 # Another strange koefficient
efficiency = 1.0 # W is not so powerful
V = 100 # Volume of out stand
R = 8.31 # Universal gas constant
v = V/22.4 # Amount of Mols in our volume
K = 5*v*R/4 # Some multiply that is often used
W_0 = eta_0*(T_in_0-T_out_0)/efficiency # Starting power
W_max = 10 # Maximum power
P_max = 2 # Max P parametr
dP = 0.1 # P searching step
I_max = 0.1 # Max I parametr
dI = 0.005 # I searching step
D_max = 20 # Max D parametr
dD = 0.2 # D searching step
Rounds = 2 # How many times we start the experiment with the same parametr
def standart_delta(T_list):
error = 0
for T in T_list:
error += (T - T_needed)**2 / len(T_list)
return error
def W_set(P, I, D, T, T_last):
global Y_i
ans = P*(T_needed-T) + I*(T_needed-T) + Y_i + D*(T_last-T)
Y_i += I*(T_needed-T)
# return min(max(efficiency*ans, 0), W_max)
return efficiency*ans
Y_i = 0
def Calculate(P, I, D):
T_0 = []
T_R = []
W = []
Wex = []
T_0.append(T_in_0)
T_R.append(T_in_0)
W.append(W_0)
Wex.append(W_0)
t_d = 0
a_d = 0
T_out = T_out_0
for t in range(t_max):
eta = eta_0 + (2*d_eta)*random.random() - d_eta
if t_d == 0:
if random.random() < disturb_probability:
if a_d < disturb_amount:
# print('Lucky you, a_d = ' + str(a_d) + ', t = ' + str(t))
a_d += 1
t_d += 1
if random.random() < 0.5:
T_out += dT_disturbance
else:
T_out -= dT_disturbance
else:
t_d += 1
if t_d >= t_disturb:
T_out = T_out_0
t_d = 0
if t < t_delay:
W.append(W[t] + W_set(P, I, D, T_in_0, T_in_0))
T_R.append(T_R[t] + W[t]*(1-mu)/K)
T_0.append(T_0[t])
Wex.append(mu*W[t+1])
else:
W.append(W[t] + W_set(P, I, D, T_0[t], T_0[t-1]))
T_0.append(T_R[t+1-t_delay])
T_R.append((0.5*(W[t+1]+W[t])+eta*(T_out-0.5*T_R[t]) + K*(T_R[t]-T_0[t+1]+T_0[t]))/(0.5*eta+K))
Wex.append(eta*(T_R[t+1]-T_out))
return T_0, T_R, W, Wex, standart_delta(T_0)
def Print(T_0, T_R, W, Wex):
for t in range(len(W)):
if t == 0:
print("t = " + str(t) + ", T_R = " + str(T_R[t]) + ", T_0 = " + str(T_0[t]) + \
", W = " + str(W[t]) + ", Wex = " + str(W[t]))
else:
if t < t_delay:
print("t = " + str(t) + ", T_R = " + str(T_R[t]) + ", T_0 = " + str(T_0[t]) + \
", W = " + str(W[t]) + ", Wex = " + str(mu*W[t]))
else:
print("t = " + str(t) + ", T_R = " + str(T_R[t]) + ", T_0 = " + str(T_0[t]) + \
", W = " + str(W[t]) + ", Wex = " + str(eta*(T_R[t]-T_out_0)))
P_best = 0.70
I_best = 0.00
D_best = 4.80
T_0, T_R, W, Wex, besterr = Calculate(P_best, I_best, D_best)
'''
for P in numpy.arange(0, P_max+1.5*dP, dP):
print("New P = " + str(P))
for I in numpy.arange(0, I_max+1.5*dI, dI):
for D in numpy.arange(0, D_max+1.5*dD, dD):
Y_i = 0
temperr_R = 0
for Round in range(Rounds):
T_0, T_R, W, Wex, temperr = Calculate(P, I, D)
temperr_R += temperr / Rounds
if temperr_R < besterr:
print("P = " + str(P) + ", I = " + str(I) + ", D = " + str(D) + ", temperr = " + str(temperr))
besterr = temperr
P_best = P
I_best = I
D_best = D
# '''
Y_i = 0
T_0, T_R, W, Wex, besterr = Calculate(P_best, I_best, D_best)
print("Best:")
print("P = " + str(P_best) + ", I = " + str(I_best) + ", D = " + str(D_best) + ", besterr = " + str(besterr))
plt.plot(T_0)
#plt.plot(T_R)
plt.show()
plt.plot(W)
#plt.plot(Wex)
plt.show()
| gpl-2.0 |
aabadie/scikit-learn | sklearn/cross_decomposition/pls_.py | 35 | 30767 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from distutils.version import LooseVersion
from sklearn.utils.extmath import svd_flip
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
import scipy
pinv2_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
pinv2_args = {'check_finite': False}
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
# We use slower pinv2 (same as np.linalg.pinv) for stability
# reasons
X_pinv = linalg.pinv2(X, **pinv2_args)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# If y_score only has zeros x_weights will only have zeros. In
# this case add an epsilon to converge to a more acceptable
# solution
if np.dot(x_weights.T, x_weights) < eps:
x_weights += eps
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv2(Y, **pinv2_args) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# Forces sign stability of x_weights and y_weights
# Sign undeterminacy issue from svd if algorithm == "svd"
# and from platform dependent computation if algorithm == 'nipals'
x_weights, y_weights = svd_flip(x_weights, y_weights.T)
y_weights = y_weights.T
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv2(np.dot(self.x_loadings_.T, self.x_weights_),
**pinv2_args))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv2(np.dot(self.y_loadings_.T, self.y_weights_),
**pinv2_args))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy, dtype=FLOAT_DTYPES)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * std(Xk u) std(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(PLSRegression, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
Matrices::
T: x_scores_
U: y_scores_
W: x_weights_
C: y_weights_
P: x_loadings_
Q: y_loadings__
Are computed such that::
X = T P.T + Err and Y = U Q.T + Err
T[:, k] = Xk W[:, k] for k in range(n_components)
U[:, k] = Yk C[:, k] for k in range(n_components)
x_rotations_ = W (P.T W)^(-1)
y_rotations_ = C (Q.T C)^(-1)
where Xk and Yk are residual matrices at iteration k.
`Slides explaining PLS <http://www.eigenvector.com/Docs/Wise_pls_properties.pdf>`
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * std(Xk u) std(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
super(PLSCanonical, self).__init__(
n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (
_center_scale_xy(X, Y, self.scale))
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
# Deterministic output
U, V = svd_flip(U, V)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
jniediek/mne-python | examples/preprocessing/plot_rereference_eeg.py | 9 | 2271 | """
=============================
Re-referencing the EEG signal
=============================
Load raw data and apply some EEG referencing schemes.
"""
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from matplotlib import pyplot as plt
print(__doc__)
# Setup for reading the raw data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Read the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
events = mne.read_events(event_fname)
# The EEG channels will be plotted to visualize the difference in referencing
# schemes.
picks = mne.pick_types(raw.info, meg=False, eeg=True, eog=True, exclude='bads')
###############################################################################
# Apply different EEG referencing schemes and plot the resulting evokeds
reject = dict(eeg=180e-6, eog=150e-6)
epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks, reject=reject)
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, sharex=True)
# No reference. This assumes that the EEG has already been referenced properly.
# This explicitly prevents MNE from adding a default EEG reference.
raw_no_ref, _ = mne.io.set_eeg_reference(raw, [])
evoked_no_ref = mne.Epochs(raw_no_ref, **epochs_params).average()
del raw_no_ref # Free memory
evoked_no_ref.plot(axes=ax1, titles=dict(eeg='EEG Original reference'))
# Average reference. This is normally added by default, but can also be added
# explicitly.
raw_car, _ = mne.io.set_eeg_reference(raw)
evoked_car = mne.Epochs(raw_car, **epochs_params).average()
del raw_car
evoked_car.plot(axes=ax2, titles=dict(eeg='EEG Average reference'))
# Use the mean of channels EEG 001 and EEG 002 as a reference
raw_custom, _ = mne.io.set_eeg_reference(raw, ['EEG 001', 'EEG 002'])
evoked_custom = mne.Epochs(raw_custom, **epochs_params).average()
del raw_custom
evoked_custom.plot(axes=ax3, titles=dict(eeg='EEG Custom reference'))
mne.viz.tight_layout()
| bsd-3-clause |
jzt5132/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
ggventurini/dualscope123 | dualscope123/main.py | 1 | 40618 | #!/usr/bin/env python
"""
Oscilloscope + spectrum analyser in Python for the NIOS server.
Modified version from the original code by R. Fearick.
Giuseppe Venturini, July 2012-2013
Original copyright notice follows. The same license applies.
------------------------------------------------------------
Copyright (C) 2008, Roger Fearick, University of Cape Town
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
------------------------------------------------------------
Version 0.1
This code provides a two-channel oscilloscope and spectrum analyzer.
Dependencies:
Python 2.6+
numpy -- numerics, fft
PyQt4, PyQwt5 -- gui, graphics
Optional packages:
pyspectrum -- expert mode spectrum calculation
Typically, a modification of the Python path and ld library is necessary,
like this:
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:.
export PYTHONPATH=$PYTHONPATH:.
The code can be adjusted for different sampling rates and chunks lengths.
The interface, based on qwt, uses a familar 'knob based' layout so that it
approximates an analogue scope.
Traces can be averaged to reduce influence of noise.
A cross hair status display permits the reading of values off the screen.
Printing and exporting CSV and PDF files are provided.
FFT options
- by default we use the periogram algorithm from pyspectrum [1] - not
in Debian stable but available through pypi and easy_install.
[1] https://www.assembla.com/spaces/PySpectrum/wiki
- If 'pyspectrum' is not available, we fallback to using the FFT
method from numpy to compute the PSD.
- Using numpy to calculate the FFT can be forced setting:
USE_NUMPY_FFT = True
in the following code.
- additionally, it is possible to use matplotlib.psd().
-> you need to modify the sources to do so.
INSTALLING pyspectrum
The package pyspectrum can be installed with either:
'pip install spectrum'
"""
import sys
import struct
import subprocess
import time
import os.path
import ConfigParser
import importlib
from PyQt4 import Qt
from PyQt4 import Qwt5 as Qwt
import numpy as np
import numpy.fft as FFT
# part of this package -- csv interface and toolbar icons
from . import csvlib, icons, utils
import dualscope123.probes
from dualscope123.probes import eth_nios
# scope configuration
CHANNELS = 2
DEFAULT_TIMEBASE = 0.01
BOTH12 = 0
CH1 = 1
CH2 = 2
scopeheight = 500 #px
scopewidth = 800 #px
SELECTEDCH = BOTH12
TIMEPENWIDTH = 1
FFTPENWIDTH = 2
# status messages
freezeInfo = 'Freeze: Press mouse button and drag'
cursorInfo = 'Cursor Pos: Press mouse button in plot region'
# FFT CONFIG
USE_NUMPY_FFT = False
try:
import spectrum
print "(II) spectrum MODULE FOUND"
SPECTRUM_MODULE = True
except ImportError:
print "(WW) PSD: spectrum MODULE NOT FOUND"
SPECTRUM_MODULE = False
if USE_NUMPY_FFT:
print "(WW) SPECTRUM MODULE DISABLED in source"
SPECTRUM_MODULE = False
if not SPECTRUM_MODULE:
print "(WW) PSD: using FFTs through NUMPY.fftpack"
# utility classes
class LogKnob(Qwt.QwtKnob):
"""
Provide knob with log scale
"""
def __init__(self, *args):
apply(Qwt.QwtKnob.__init__, (self,) + args)
self.setScaleEngine(Qwt.QwtLog10ScaleEngine())
def setRange(self, minR, maxR, step=.333333):
self.setScale(minR, maxR)
Qwt.QwtKnob.setRange(self, np.log10(minR), np.log10(maxR), step)
def setValue(self, val):
Qwt.QwtKnob.setValue(self, np.log10(val))
class LblKnob:
"""
Provide knob with a label
"""
def __init__(self, wgt, x, y, name, logscale=0):
if logscale:
self.knob = LogKnob(wgt)
else:
self.knob = Qwt.QwtKnob(wgt)
color = Qt.QColor(200, 200, 210)
self.knob.palette().setColor(Qt.QPalette.Active,
Qt.QPalette.Button,
color)
self.lbl = Qt.QLabel(name, wgt)
self.knob.setGeometry(x, y, 140, 100)
# oooh, eliminate this ...
if name[0] == 'o':
self.knob.setKnobWidth(40)
self.lbl.setGeometry(x, y+90, 140, 15)
self.lbl.setAlignment(Qt.Qt.AlignCenter)
def setRange(self, *args):
apply(self.knob.setRange, args)
def setValue(self, *args):
apply(self.knob.setValue, args)
def setScaleMaxMajor(self, *args):
apply(self.knob.setScaleMaxMajor, args)
class Scope(Qwt.QwtPlot):
"""
Oscilloscope display widget
"""
def __init__(self, *args):
apply(Qwt.QwtPlot.__init__, (self,) + args)
self.setTitle('Scope')
self.setCanvasBackground(Qt.Qt.white)
# grid
self.grid = Qwt.QwtPlotGrid()
self.grid.enableXMin(True)
self.grid.setMajPen(Qt.QPen(Qt.Qt.gray, 0, Qt.Qt.SolidLine))
self.grid.attach(self)
# axes
self.enableAxis(Qwt.QwtPlot.yRight)
self.setAxisTitle(Qwt.QwtPlot.xBottom, 'Time [s]')
self.setAxisTitle(Qwt.QwtPlot.yLeft, 'Amplitude []')
self.setAxisMaxMajor(Qwt.QwtPlot.xBottom, 10)
self.setAxisMaxMinor(Qwt.QwtPlot.xBottom, 0)
self.setAxisScaleEngine(Qwt.QwtPlot.yRight, Qwt.QwtLinearScaleEngine())
self.setAxisMaxMajor(Qwt.QwtPlot.yLeft, 10)
self.setAxisMaxMinor(Qwt.QwtPlot.yLeft, 0)
self.setAxisMaxMajor(Qwt.QwtPlot.yRight, 10)
self.setAxisMaxMinor(Qwt.QwtPlot.yRight, 0)
# curves for scope traces: 2 first so 1 is on top
self.curve2 = Qwt.QwtPlotCurve('Trace2')
self.curve2.setSymbol(Qwt.QwtSymbol(Qwt.QwtSymbol.Ellipse,
Qt.QBrush(2),
Qt.QPen(Qt.Qt.darkMagenta),
Qt.QSize(3, 3)))
self.curve2.setPen(Qt.QPen(Qt.Qt.magenta, TIMEPENWIDTH))
self.curve2.setYAxis(Qwt.QwtPlot.yRight)
self.curve2.attach(self)
self.curve1 = Qwt.QwtPlotCurve('Trace1')
self.curve1.setSymbol(Qwt.QwtSymbol(Qwt.QwtSymbol.Ellipse,
Qt.QBrush(2),
Qt.QPen(Qt.Qt.darkBlue),
Qt.QSize(3, 3)))
self.curve1.setPen(Qt.QPen(Qt.Qt.blue, TIMEPENWIDTH))
self.curve1.setYAxis(Qwt.QwtPlot.yLeft)
self.curve1.attach(self)
# default settings
self.triggerval = 0.10
self.triggerCH = None
self.triggerslope = 0
self.maxamp = 100.0
self.maxamp2 = 100.0
self.freeze = 0
self.average = 0
self.autocorrelation = 0
self.avcount = 0
self.datastream = None
self.offset1 = 0.0
self.offset2 = 0.0
self.maxtime = 0.1
# set data
# NumPy: f, g, a and p are arrays!
self.dt = 1.0/samplerate
self.f = np.arange(0.0, 10.0, self.dt)
self.a1 = 0.0*self.f
self.a2 = 0.0*self.f
self.curve1.setData(self.f, self.a1)
self.curve2.setData(self.f, self.a2)
# start self.timerEvent() callbacks running
self.timer_id = self.startTimer(self.maxtime*100+50)
# plot
self.replot()
# convenience methods for knob callbacks
def setMaxAmp(self, val):
self.maxamp = val
def setMaxAmp2(self, val):
self.maxamp2 = val
def setMaxTime(self, val):
self.maxtime = val
def setOffset1(self, val):
self.offset1 = val
def setOffset2(self, val):
self.offset2 = val
def setTriggerLevel(self, val):
self.triggerval = val
def setTriggerCH(self, val):
self.triggerCH = val
def setTriggerSlope(self, val):
self.triggerslope = val
# plot scope traces
def setDisplay(self):
l = len(self.a1)
if SELECTEDCH == BOTH12:
self.curve1.setData(self.f[0:l], self.a1[:l]+self.offset1*self.maxamp)
self.curve2.setData(self.f[0:l], self.a2[:l]+self.offset2*self.maxamp2)
elif SELECTEDCH == CH2:
self.curve1.setData([0.0,0.0], [0.0,0.0])
self.curve2.setData(self.f[0:l], self.a2[:l]+self.offset2*self.maxamp2)
elif SELECTEDCH == CH1:
self.curve1.setData(self.f[0:l], self.a1[:l]+self.offset1*self.maxamp)
self.curve2.setData([0.0,0.0], [0.0,0.0])
self.replot()
def getValue(self, index):
return self.f[index], self.a[index]
def setAverage(self, state):
self.average = state
self.avcount = 0
def setAutoc(self, state):
self.autocorrelation = state
self.avcount = 0
def setFreeze(self, freeze):
self.freeze = freeze
def setDatastream(self, datastream):
self.datastream = datastream
def updateTimer(self):
self.killTimer(self.timer_id)
self.timer_id = self.startTimer(self.maxtime*100 + 50)
# timer callback that does the work
def timerEvent(self,e): # Scope
global fftbuffersize
if self.datastream == None: return
if self.freeze == 1: return
points = int(np.ceil(self.maxtime*samplerate))
if self.triggerCH or self.autocorrelation:
# we read twice as much data to be sure to be able to display data for all time points.
# independently of trigger point location.
read_points = 2*points
else:
read_points = points
fftbuffersize = read_points
if SELECTEDCH == BOTH12:
channel = 12
if verbose:
print "Reading %d frames" % (read_points)
X, Y = self.datastream.read(channel, read_points, verbose)
if X is None or not len(X): return
if len(X) == 0: return
i=0
data_CH1 = X
data_CH2 = Y
elif SELECTEDCH == CH1:
channel = 1
if verbose:
print "Reading %d frames" % (read_points)
X = self.datastream.read(channel, read_points, verbose)
if X is None or not len(X): return
if len(X) == 0: return
i=0
data_CH1 = X
data_CH2 = np.zeros((points,))
if SELECTEDCH == CH2:
channel = 2
if verbose:
print "Reading %d frames" % (read_points)
X = self.datastream.read(channel, read_points, verbose)
if X is None or not len(X): return
data_CH2 = X
data_CH1 = np.zeros((points,))
if self.triggerCH == 1 and (SELECTEDCH == BOTH12 or SELECTEDCH == CH1):
print "Waiting for CH1 trigger..."
if self.triggerslope == 0:
zero_crossings = np.where(np.diff(np.sign(data_CH1[points/2:-points/2] - self.triggerval*self.maxamp)) != 0)[0]
if self.triggerslope == 1:
zero_crossings = np.where(np.diff(np.sign(data_CH1[points/2:-points/2] - self.triggerval*self.maxamp)) > 0)[0]
if self.triggerslope == 2:
zero_crossings = np.where(np.diff(np.sign(data_CH1[points/2:-points/2] - self.triggerval*self.maxamp)) < 0)[0]
if not len(zero_crossings): return
print "Triggering on sample", zero_crossings[0]
imin = zero_crossings[0]
imax = zero_crossings[0] + points
data_CH1 = data_CH1[imin:imax]
elif self.triggerCH == 2 and (SELECTEDCH == BOTH12 or SELECTEDCH == CH2):
print "Waiting for CH2 trigger..."
if self.triggerslope == 0:
zero_crossings = np.where(np.diff(np.sign(data_CH2[points/2:-points/2] - self.triggerval*self.maxamp2)) != 0)[0]
if self.triggerslope == 1:
zero_crossings = np.where(np.diff(np.sign(data_CH2[points/2:-points/2] - self.triggerval*self.maxamp2)) > 0)[0]
if self.triggerslope == 2:
zero_crossings = np.where(np.diff(np.sign(data_CH2[points/2:-points/2] - self.triggerval*self.maxamp2)) < 0)[0]
if not len(zero_crossings): return
print "Triggering on sample", zero_crossings[0]
imin = zero_crossings[0]
imax = zero_crossings[0] + points
data_CH2 = data_CH2[imin:imax]
if self.autocorrelation:
if SELECTEDCH == BOTH12 or SELECTEDCH == CH1:
data_CH1 = utils.autocorrelation(data_CH1[:2*points])[:points]
else:
data_CH1 = np.zeros((points,))
if SELECTEDCH == BOTH12 or SELECTEDCH == CH2:
data_CH2 = utils.autocorrelation(data_CH2[:2*points])[:points]
else:
data_CH2 = np.zeros((points,))
if self.average == 0:
self.a1 = data_CH1
self.a2 = data_CH2
else:
self.avcount += 1
if self.avcount == 1:
self.sumCH1 = np.array(data_CH1, dtype=np.float_)
self.sumCH2 = np.array(data_CH2, dtype=np.float_)
else:
if SELECTEDCH==BOTH12:
assert len(data_CH1) == len(data_CH2)
lp = len(data_CH1)
if len(self.sumCH1) == lp and len(self.sumCH2) == lp:
self.sumCH1 = self.sumCH1[:lp] + np.array(data_CH1[:lp], dtype=np.float_)
self.sumCH2 = self.sumCH2[:lp] + np.array(data_CH2[:lp], dtype=np.float_)
else:
self.sumCH1 = np.array(data_CH1, dtype=np.float_)
self.sumCH2 = np.array(data_CH2, dtype=np.float_)
self.avcount = 1
elif SELECTEDCH == CH1:
lp = len(data_CH1)
if len(self.sumCH1) == lp:
self.sumCH1 = self.sumCH1[:lp] + np.array(data_CH1[:lp], dtype=np.float_)
else:
self.sumCH1 = np.array(data_CH1, dtype=np.float_)
self.avcount = 1
elif SELECTEDCH==CH2:
lp = len(data_CH2)
if len(self.sumCH2) == lp:
self.sumCH2 = self.sumCH2[:lp] + np.array(data_CH2[:lp], dtype=np.float_)
else:
self.sumCH2 = np.array(data_CH2, dtype=np.float_)
self.avcount = 1
self.a1 = self.sumCH1/self.avcount
self.a2 = self.sumCH2/self.avcount
self.setDisplay()
inittime=0.01
initamp=100
class ScopeFrame(Qt.QFrame):
"""
Oscilloscope widget --- contains controls + display
"""
def __init__(self, *args):
apply(Qt.QFrame.__init__, (self,) + args)
# the following: setPal.. doesn't seem to work on Win
try:
self.setPaletteBackgroundColor( QColor(240,240,245))
except: pass
hknobpos=scopewidth+20
vknobpos=scopeheight+30
self.setFixedSize(scopewidth+150, scopeheight+150)
self.freezeState = 0
self.triggerComboBox = Qt.QComboBox(self)
self.triggerComboBox.setGeometry(hknobpos+10, 50, 100, 40)#"Channel: ")
self.triggerComboBox.addItem("Trigger off")
self.triggerComboBox.addItem("CH1")
self.triggerComboBox.addItem("CH2")
self.triggerComboBox.setCurrentIndex(0)
self.triggerSlopeComboBox = Qt.QComboBox(self)
self.triggerSlopeComboBox.setGeometry(hknobpos+10, 100, 100, 40)#"Channel: ")
self.triggerSlopeComboBox.addItem("Any Slope")
self.triggerSlopeComboBox.addItem("Positive")
self.triggerSlopeComboBox.addItem("Negative")
self.triggerSlopeComboBox.setCurrentIndex(0)
self.knbLevel = LblKnob(self, hknobpos, 160,"Trigger level (%FS)")
self.knbTime = LblKnob(self, hknobpos, 300,"Time", 1)
self.knbSignal = LblKnob(self, 150, vknobpos, "Signal1",1)
self.knbSignal2 = LblKnob(self, 450, vknobpos, "Signal2",1)
self.knbOffset1=LblKnob(self, 10, vknobpos, "offset1")
self.knbOffset2=LblKnob(self, 310, vknobpos, "offset2")
self.knbTime.setRange(0.0001, 1.0)
self.knbTime.setValue(DEFAULT_TIMEBASE)
self.knbSignal.setRange(1, 1e6, 1)
self.knbSignal.setValue(100.0)
self.knbSignal2.setRange(1, 1e6, 1)
self.knbSignal2.setValue(100.0)
self.knbOffset2.setRange(-1.0, 1.0, 0.1)
self.knbOffset2.setValue(0.0)
self.knbOffset1.setRange(-1.0, 1.0, 0.1)
self.knbOffset1.setValue(0.0)
self.knbLevel.setRange(-1.0, 1.0, 0.1)
self.knbLevel.setValue(0.1)
self.knbLevel.setScaleMaxMajor(10)
self.plot = Scope(self)
self.plot.setGeometry(10, 10, scopewidth, scopeheight)
self.picker = Qwt.QwtPlotPicker(
Qwt.QwtPlot.xBottom,
Qwt.QwtPlot.yLeft,
Qwt.QwtPicker.PointSelection | Qwt.QwtPicker.DragSelection,
Qwt.QwtPlotPicker.CrossRubberBand,
Qwt.QwtPicker.ActiveOnly, #AlwaysOn,
self.plot.canvas())
self.picker.setRubberBandPen(Qt.QPen(Qt.Qt.green))
self.picker.setTrackerPen(Qt.QPen(Qt.Qt.cyan))
self.connect(self.knbTime.knob, Qt.SIGNAL("valueChanged(double)"),
self.setTimebase)
self.knbTime.setValue(0.01)
self.connect(self.knbSignal.knob, Qt.SIGNAL("valueChanged(double)"),
self.setAmplitude)
self.connect(self.knbSignal2.knob, Qt.SIGNAL("valueChanged(double)"),
self.setAmplitude2)
#self.knbSignal.setValue(0.1)
self.connect(self.knbLevel.knob, Qt.SIGNAL("valueChanged(double)"),
self.setTriggerlevel)
self.connect(self.knbOffset1.knob, Qt.SIGNAL("valueChanged(double)"),
self.plot.setOffset1)
self.connect(self.knbOffset2.knob, Qt.SIGNAL("valueChanged(double)"),
self.plot.setOffset2)
self.connect(self.triggerComboBox, Qt.SIGNAL('currentIndexChanged(int)'), self.setTriggerCH)
self.connect(self.triggerSlopeComboBox, Qt.SIGNAL('currentIndexChanged(int)'), self.plot.setTriggerSlope)
self.knbLevel.setValue(0.1)
self.plot.setAxisScale( Qwt.QwtPlot.xBottom, 0.0, 10.0*inittime)
self.plot.setAxisScale( Qwt.QwtPlot.yLeft, -initamp, initamp)
self.plot.setAxisScale( Qwt.QwtPlot.yRight, -initamp, initamp)
self.plot.show()
def _calcKnobVal(self, val):
ival = np.floor(val)
frac = val - ival
if frac >= 0.9:
frac = 1.0
elif frac >= 0.66:
frac = np.log10(5.0)
elif frac >= np.log10(2.0):
frac = np.log10(2.0)
else:
frac = 0.0
dt = 10**frac*10**ival
return dt
def setTimebase(self, val):
dt = self._calcKnobVal(val)
self.plot.setAxisScale( Qwt.QwtPlot.xBottom, 0.0, 10.0*dt)
self.plot.setMaxTime(dt*10.0)
self.plot.replot()
def setAmplitude(self, val):
dt = self._calcKnobVal(val)
self.plot.setAxisScale( Qwt.QwtPlot.yLeft, -dt, dt)
self.plot.setMaxAmp(dt)
self.plot.replot()
def setAmplitude2(self, val):
dt = self._calcKnobVal(val)
self.plot.setAxisScale( Qwt.QwtPlot.yRight, -dt, dt)
self.plot.setMaxAmp2(dt)
self.plot.replot()
def setTriggerlevel(self, val):
self.plot.setTriggerLevel(val)
self.plot.setDisplay()
def setTriggerCH(self, val):
if val == 0:
val = None
self.plot.setTriggerCH(val)
self.plot.setDisplay()
#--------------------------------------------------------------------
class FScope(Qwt.QwtPlot):
"""
Power spectrum display widget
"""
def __init__(self, *args):
apply(Qwt.QwtPlot.__init__, (self,) + args)
self.setTitle('Power spectrum');
self.setCanvasBackground(Qt.Qt.white)
# grid
self.grid = Qwt.QwtPlotGrid()
self.grid.enableXMin(True)
self.grid.setMajPen(Qt.QPen(Qt.Qt.gray, 0, Qt.Qt.SolidLine));
self.grid.attach(self)
# axes
self.setAxisTitle(Qwt.QwtPlot.xBottom, 'Frequency [Hz]');
self.setAxisTitle(Qwt.QwtPlot.yLeft, 'Power Spectrum [dBc/Hz]');
self.setAxisMaxMajor(Qwt.QwtPlot.xBottom, 10);
self.setAxisMaxMinor(Qwt.QwtPlot.xBottom, 0);
self.setAxisMaxMajor(Qwt.QwtPlot.yLeft, 10);
self.setAxisMaxMinor(Qwt.QwtPlot.yLeft, 0);
# curves
self.curve2 = Qwt.QwtPlotCurve('PSTrace2')
self.curve2.setPen(Qt.QPen(Qt.Qt.magenta,FFTPENWIDTH))
self.curve2.setYAxis(Qwt.QwtPlot.yLeft)
self.curve2.attach(self)
self.curve1 = Qwt.QwtPlotCurve('PSTrace1')
self.curve1.setPen(Qt.QPen(Qt.Qt.blue,FFTPENWIDTH))
self.curve1.setYAxis(Qwt.QwtPlot.yLeft)
self.curve1.attach(self)
self.triggerval=0.0
self.maxamp=100.0
self.maxamp2=100.0
self.freeze=0
self.average=0
self.avcount=0
self.logy=1
self.datastream=None
self.dt=1.0/samplerate
self.df=1.0/(fftbuffersize*self.dt)
self.f = np.arange(0.0, samplerate, self.df)
self.a1 = 0.0*self.f
self.a2 = 0.0*self.f
self.curve1.setData(self.f, self.a1)
self.curve2.setData(self.f, self.a2)
self.setAxisScale( Qwt.QwtPlot.xBottom, 0.0, 12.5*initfreq)
self.setAxisScale( Qwt.QwtPlot.yLeft, -120.0, 0.0)
self.startTimer(100)
self.replot()
def resetBuffer(self):
self.df=1.0/(fftbuffersize*self.dt)
self.f = np.arange(0.0, samplerate, self.df)
self.a1 = 0.0*self.f
self.a2 = 0.0*self.f
self.curve1.setData(self.curve1, self.f, self.a1)
self.curve1.setData(self.curve1, self.f, self.a2)
def setMaxAmp(self, val):
if val>0.6:
self.setAxisScale( Qwt.QwtPlot.yLeft, -120.0, 0.0)
self.logy=1
else:
self.setAxisScale( Qwt.QwtPlot.yLeft, 0.0, 10.0*val)
self.logy=0
self.maxamp=val
def setMaxTime(self, val):
self.maxtime=val
self.updateTimer()
def setTriggerLevel(self, val):
self.triggerval=val
def setDisplay(self):
n=fftbuffersize/2
if SELECTEDCH==BOTH12:
self.curve1.setData(self.f[0:n], self.a1[:n])
self.curve2.setData(self.f[0:n], self.a2[:n])
elif SELECTEDCH==CH2:
self.curve1.setData([0.0,0.0], [0.0,0.0])
self.curve2.setData(self.f[0:n], self.a2[:n])
elif SELECTEDCH==CH1:
self.curve1.setData(self.f[0:n], self.a1[:n])
self.curve2.setData([0.0,0.0], [0.0,0.0])
self.replot()
def getValue(self, index):
return self.f[index],self.a1[index]
def setAverage(self, state):
self.average = state
self.avcount=0
def setFreeze(self, freeze):
self.freeze = freeze
def setDatastream(self, datastream):
self.datastream = datastream
def timerEvent(self,e): # FFT
global fftbuffersize
if self.datastream == None: return
if self.freeze == 1: return
if SELECTEDCH == BOTH12:
channel = 12
X, Y = self.datastream.read(channel, fftbuffersize, verbose)
if X is None or not len(X): return
data_CH1 = X[:fftbuffersize]
data_CH2 = Y[:fftbuffersize]
elif SELECTEDCH == CH1:
channel = 1
X = self.datastream.read(channel, fftbuffersize, verbose)
if X is None or not len(X): return
data_CH1 = X[:fftbuffersize]
data_CH2 = np.ones((fftbuffersize,))
elif SELECTEDCH == CH2:
channel = 2
X = self.datastream.read(channel, fftbuffersize, verbose)
if X is None or not len(X): return
data_CH2 = X[:fftbuffersize]
data_CH1 = np.ones((fftbuffersize,))
self.df = 1.0/(fftbuffersize*self.dt)
self.setAxisTitle(Qwt.QwtPlot.xBottom, 'Frequency [Hz] - Bin width %g Hz' % (self.df,))
self.f = np.arange(0.0, samplerate, self.df)
if not SPECTRUM_MODULE:
lenX = fftbuffersize
window = np.blackman(lenX)
sumw = np.sum(window*window)
A = FFT.fft(data_CH1*window) #lenX
B = (A*np.conjugate(A)).real
A = FFT.fft(data_CH2*window) #lenX
B2 = (A*np.conjugate(A)).real
sumw *= 2.0 # sym about Nyquist (*4); use rms (/2)
sumw /= self.dt # sample rate
B /= sumw
B2 /= sumw
else:
print "FFT buffer size: %d points" % (fftbuffersize,)
B = spectrum.Periodogram(np.array(data_CH1, dtype=float64), samplerate)
B.sides = 'onesided'
B.run()
B = B.get_converted_psd('onesided')
B2 = spectrum.Periodogram(np.array(data_CH2, dtype=float64), samplerate)
B2.sides = 'onesided'
B2.run()
B2 = B2.get_converted_psd('onesided')
if self.logy:
P1 = np.log10(B)*10.0
P2 = np.log10(B2)*10.0
P1 -= P1.max()
P2 -= P2.max()
else:
P1 = B
P2 = B2
if not self.average:
self.a1 = P1
self.a2 = P2
self.avcount = 0
else:
self.avcount += 1
if self.avcount == 1:
self.sumP1 = P1
self.sumP2 = P2
elif self.sumP1.shape != P1.shape or self.sumP1.shape != P1.shape:
self.avcount = 1
self.sumP1 = P1
self.sumP2 = P2
else:
self.sumP1 += P1
self.sumP2 += P2
self.a1 = self.sumP1/self.avcount
self.a2 = self.sumP2/self.avcount
self.setDisplay()
initfreq = 100.0
class FScopeFrame(Qt.QFrame):
"""
Power spectrum widget --- contains controls + display
"""
def __init__(self , *args):
apply(Qt.QFrame.__init__, (self,) + args)
vknobpos=scopeheight+30
hknobpos=scopewidth+10
# the following: setPal.. doesn't seem to work on Ein
try:
self.setPaletteBackgroundColor( QColor(240,240,245))
except: pass
self.setFixedSize(scopewidth+160, scopeheight+160)
self.freezeState = 0
self.knbSignal = LblKnob(self,160, vknobpos, "Signal",1)
self.knbTime = LblKnob(self,310, vknobpos,"Frequency", 1)
self.knbTime.setRange(1.0, 1250.0)
self.knbSignal.setRange(100, 1000000)
self.plot = FScope(self)
self.plot.setGeometry(12.5, 10, scopewidth+120, scopeheight)
self.picker = Qwt.QwtPlotPicker(
Qwt.QwtPlot.xBottom,
Qwt.QwtPlot.yLeft,
Qwt.QwtPicker.PointSelection | Qwt.QwtPicker.DragSelection,
Qwt.QwtPlotPicker.CrossRubberBand,
Qwt.QwtPicker.ActiveOnly, #AlwaysOn,
self.plot.canvas())
self.picker.setRubberBandPen(Qt.QPen(Qt.Qt.green))
self.picker.setTrackerPen(Qt.QPen(Qt.Qt.cyan))
self.connect(self.knbTime.knob, Qt.SIGNAL("valueChanged(double)"),
self.setTimebase)
self.knbTime.setValue(1000.0)
self.connect(self.knbSignal.knob, Qt.SIGNAL("valueChanged(double)"),
self.setAmplitude)
self.knbSignal.setValue(1000000)
self.plot.show()
def _calcKnobVal(self,val):
ival = np.floor(val)
frac = val - ival
if frac >= 0.9:
frac = 1.0
elif frac >= 0.66:
frac = np.log10(5.0)
elif frac >= np.log10(2.0):
frac = np.log10(2.0)
else:
frac = 0.0
dt = 10**frac*10**ival
return dt
def setTimebase(self, val):
dt = self._calcKnobVal(val)
self.plot.setAxisScale(Qwt.QwtPlot.xBottom, 0.0, 12.5*dt)
self.plot.replot()
def setAmplitude(self, val):
minp = self._calcKnobVal(val)
self.plot.setAxisScale(Qwt.QwtPlot.yLeft, -int(np.log10(minp)*20), 0.0)
self.plot.replot()
#---------------------------------------------------------------------
class FScopeDemo(Qt.QMainWindow):
"""
Application container widget
Contains scope and power spectrum analyser in tabbed windows.
Enables switching between the two.
Handles toolbar and status.
"""
def __init__(self, *args):
apply(Qt.QMainWindow.__init__, (self,) + args)
self.freezeState = 0
self.changeState = 0
self.averageState = 0
self.autocState = 0
self.scope = ScopeFrame(self)
self.current = self.scope
self.pwspec = FScopeFrame(self)
self.pwspec.hide()
self.stack=Qt.QTabWidget(self)
self.stack.addTab(self.scope,"scope")
self.stack.addTab(self.pwspec,"fft")
self.setCentralWidget(self.stack)
toolBar = Qt.QToolBar(self)
self.addToolBar(toolBar)
sb=self.statusBar()
sbfont=Qt.QFont("Helvetica",12)
sb.setFont(sbfont)
self.btnFreeze = Qt.QToolButton(toolBar)
self.btnFreeze.setText("Freeze")
self.btnFreeze.setIcon(Qt.QIcon(Qt.QPixmap(icons.stopicon)))
self.btnFreeze.setCheckable(True)
self.btnFreeze.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnFreeze)
self.btnSave = Qt.QToolButton(toolBar)
self.btnSave.setText("Save CSV")
self.btnSave.setIcon(Qt.QIcon(Qt.QPixmap(icons.save)))
self.btnSave.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnSave)
self.btnPDF = Qt.QToolButton(toolBar)
self.btnPDF.setText("Export PDF")
self.btnPDF.setIcon(Qt.QIcon(Qt.QPixmap(icons.pdf)))
self.btnPDF.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnPDF)
self.btnPrint = Qt.QToolButton(toolBar)
self.btnPrint.setText("Print")
self.btnPrint.setIcon(Qt.QIcon(Qt.QPixmap(icons.print_xpm)))
self.btnPrint.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnPrint)
self.btnMode = Qt.QToolButton(toolBar)
self.btnMode.setText("fft")
self.btnMode.setIcon(Qt.QIcon(Qt.QPixmap(icons.pwspec)))
self.btnMode.setCheckable(True)
self.btnMode.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnMode)
self.btnAvge = Qt.QToolButton(toolBar)
self.btnAvge.setText("average")
self.btnAvge.setIcon(Qt.QIcon(Qt.QPixmap(icons.avge)))
self.btnAvge.setCheckable(True)
self.btnAvge.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnAvge)
self.btnAutoc = Qt.QToolButton(toolBar)
self.btnAutoc.setText("autocorrelation")
self.btnAutoc.setIcon(Qt.QIcon(Qt.QPixmap(icons.avge)))
self.btnAutoc.setCheckable(True)
self.btnAutoc.setToolButtonStyle(Qt.Qt.ToolButtonTextUnderIcon)
toolBar.addWidget(self.btnAutoc)
#self.lstLabl = Qt.QLabel("Buffer:",toolBar)
#toolBar.addWidget(self.lstLabl)
#self.lstChan = Qt.QComboBox(toolBar)
#self.lstChan.insertItem(0,"8192")
#self.lstChan.insertItem(1,"16k")
#self.lstChan.insertItem(2,"32k")
#toolBar.addWidget(self.lstChan)
self.lstLR = Qt.QLabel("Channels:",toolBar)
toolBar.addWidget(self.lstLR)
self.lstLRmode = Qt.QComboBox(toolBar)
self.lstLRmode.insertItem(0,"1&2")
self.lstLRmode.insertItem(1,"CH1")
self.lstLRmode.insertItem(2,"CH2")
toolBar.addWidget(self.lstLRmode)
self.connect(self.btnPrint, Qt.SIGNAL('clicked()'), self.printPlot)
self.connect(self.btnSave, Qt.SIGNAL('clicked()'), self.saveData)
self.connect(self.btnPDF, Qt.SIGNAL('clicked()'), self.printPDF)
self.connect(self.btnFreeze, Qt.SIGNAL('toggled(bool)'), self.freeze)
self.connect(self.btnMode, Qt.SIGNAL('toggled(bool)'), self.mode)
self.connect(self.btnAvge, Qt.SIGNAL('toggled(bool)'), self.average)
self.connect(self.btnAutoc, Qt.SIGNAL('toggled(bool)'),
self.autocorrelation)
#self.connect(self.lstChan, Qt.SIGNAL('activated(int)'), self.fftsize)
self.connect(self.lstLRmode, Qt.SIGNAL('activated(int)'), self.channel)
self.connect(self.scope.picker,
Qt.SIGNAL('moved(const QPoint&)'),
self.moved)
self.connect(self.scope.picker,
Qt.SIGNAL('appended(const QPoint&)'),
self.appended)
self.connect(self.pwspec.picker,
Qt.SIGNAL('moved(const QPoint&)'),
self.moved)
self.connect(self.pwspec.picker,
Qt.SIGNAL('appended(const QPoint&)'),
self.appended)
self.connect(self.stack,
Qt.SIGNAL('currentChanged(int)'),
self.mode)
self.showInfo(cursorInfo)
#self.showFullScreen()
#print self.size()
def showInfo(self, text):
self.statusBar().showMessage(text)
def printPlot(self):
printer = Qt.QPrinter(Qt.QPrinter.HighResolution)
printer.setOutputFileName('scope-plot.ps')
printer.setCreator('Ethernet Scope')
printer.setOrientation(Qt.QPrinter.Landscape)
printer.setColorMode(Qt.QPrinter.Color)
docName = self.current.plot.title().text()
if not docName.isEmpty():
docName.replace(Qt.QRegExp(Qt.QString.fromLatin1('\n')), self.tr(' -- '))
printer.setDocName(docName)
dialog = Qt.QPrintDialog(printer)
if dialog.exec_():
# filter = Qwt.PrintFilter()
# if (Qt.QPrinter.GrayScale == printer.colorMode()):
# filter.setOptions(
# Qwt.QwtPlotPrintFilter.PrintAll
# & ~Qwt.QwtPlotPrintFilter.PrintBackground
# | Qwt.QwtPlotPrintFilter.PrintFrameWithScales)
self.current.plot.print_(printer)
#p = Qt.QPrinter()
#if p.setup():
# self.current.plot.printPlot(p)#, Qwt.QwtFltrDim(200));
def printPDF(self):
fileName = Qt.QFileDialog.getSaveFileName(
self,
'Export File Name',
'',
'PDF Documents (*.pdf)')
if not fileName.isEmpty():
printer = Qt.QPrinter()
printer.setOutputFormat(Qt.QPrinter.PdfFormat)
printer.setOrientation(Qt.QPrinter.Landscape)
printer.setOutputFileName(fileName)
printer.setCreator('Ethernet Scope')
self.current.plot.print_(printer)
# p = QPrinter()
# if p.setup():
# self.current.plot.printPlot(p)#, Qwt.QwtFltrDim(200));
def saveData(self):
fileName = Qt.QFileDialog.getSaveFileName(
self,
'Export File Name',
'',
'CSV Documents (*.csv)')
if not fileName.isEmpty():
csvlib.write_csv(fileName,
np.vstack((
np.arange(self.current.plot.a1.shape[0], dtype=int32)/samplerate,
self.current.plot.a1,
self.current.plot.a2)),
("TIME", "CH1", "CH2"))
def channel(self, item):
global SELECTEDCH
if item == 1:
SELECTEDCH = CH1
elif item == 2:
SELECTEDCH = CH2
else:
SELECTEDCH = BOTH12
self.scope.plot.avcount = 0
self.pwspec.plot.avcount = 0
def freeze(self, on, changeIcon=True):
if on:
self.freezeState = 1
if changeIcon:
self.btnFreeze.setText("Run")
self.btnFreeze.setIcon(Qt.QIcon(Qt.QPixmap(icons.goicon)))
else:
self.freezeState = 0
if changeIcon:
self.btnFreeze.setText("Freeze")
self.btnFreeze.setIcon(Qt.QIcon(Qt.QPixmap(icons.stopicon)))
self.scope.plot.setFreeze(self.freezeState)
self.pwspec.plot.setFreeze(self.freezeState)
def average(self, on):
if on:
self.averageState = 1
self.btnAvge.setText("single")
self.btnAvge.setIcon(Qt.QIcon(Qt.QPixmap(icons.single)))
else:
self.averageState = 0
self.btnAvge.setText("average")
self.btnAvge.setIcon(Qt.QIcon(Qt.QPixmap(icons.avge)))
self.scope.plot.setAverage(self.averageState)
self.pwspec.plot.setAverage(self.averageState)
def autocorrelation(self, on):
if on:
self.autocState = 1
self.btnAutoc.setText("normal")
self.btnAutoc.setIcon(Qt.QIcon(Qt.QPixmap(icons.single)))
else:
self.autocState = 0
self.btnAutoc.setText("autocorrelation")
self.btnAutoc.setIcon(Qt.QIcon(Qt.QPixmap(icons.avge)))
self.scope.plot.setAutoc(self.autocState)
def mode(self, on):
if on:
self.changeState=1
self.current=self.pwspec
self.btnMode.setText("scope")
self.btnMode.setIcon(Qt.QIcon(Qt.QPixmap(icons.scope)))
self.btnMode.setChecked(True)
else:
self.changeState=0
self.current=self.scope
self.btnMode.setText("fft")
self.btnMode.setIcon(Qt.QIcon(Qt.QPixmap(icons.pwspec)))
self.btnMode.setChecked(False)
if self.changeState==1:
self.stack.setCurrentIndex(self.changeState)
self.scope.plot.setDatastream(None)
self.pwspec.plot.setDatastream(stream)
else:
self.stack.setCurrentIndex(self.changeState)
self.pwspec.plot.setDatastream(None)
self.scope.plot.setDatastream(stream)
def moved(self, e):
if self.changeState==1:
name='Freq'
else:
name='Time'
frequency = self.current.plot.invTransform(Qwt.QwtPlot.xBottom, e.x())
amplitude = self.current.plot.invTransform(Qwt.QwtPlot.yLeft, e.y())
if name=='Time':
df=self.scope.plot.dt
i=int(frequency/df)
ampa=self.scope.plot.a1[i]
ampb=self.scope.plot.a2[i]
else:
df=self.pwspec.plot.df
i=int(frequency/df)
ampa=self.pwspec.plot.a1[i]
ampb=self.pwspec.plot.a2[i]
self.showInfo('%s=%g, cursor=%g, A=%g, B=%g' %
(name,frequency, amplitude,ampa,ampb))
def appended(self, e):
print 's'
# Python semantics: self.pos = e.pos() does not work; force a copy
self.xpos = e.x()
self.ypos = e.y()
self.moved(e) # fake a mouse move to show the cursor position
def load_cfg():
default = '.audio' # default probe
conf_path = os.path.expanduser('~/.dualscope123')
conf = ConfigParser.ConfigParser()
print "Loaded config file %s" % (conf_path,)
if not os.path.isfile(conf_path):
conf.add_section('probes')
conf.set("probes", "probe", 'audio')
conf.set("DEFAULT", "verbose", 'false')
with open(conf_path, 'w') as fp:
conf.write(fp)
return load_cfg()
else:
conf.read([conf_path])
if not 'probes' in conf.sections() or 'DEFAULT' in conf.sections():
raise ConfigParser.NoSectionError("Malformed config file.")
try:
probe_name = conf.get('probes', 'probe').strip("\"'").strip()
except ConfigParser.NoOptionError:
probe = default[1:]
try:
verbose = conf.get('DEFAULT', 'verbose').strip("\"'").strip()
except ConfigParser.NoOptionError:
verbose = False
try:
probe_module = importlib.import_module("."+probe_name, "dualscope123.probes")
except ImportError:
probe_module = importlib.import_module(default, "dualscope123.probes")
probe_name = default[1:]
if verbose in ('true', 'True', '1', 'on', 'yes', 'YES', 'Yes', 'On'):
print "Loaded probe %s" % probe_name
verbose = True
else:
verbose = False
return probe_module, verbose
def main():
global verbose, samplerate, CHUNK, fftbuffersize, stream
probe, verbose = load_cfg()
stream = probe.Probe()
stream.open()
samplerate = stream.RATE
CHUNK = stream.CHUNK
fftbuffersize = CHUNK
app = Qt.QApplication(sys.argv)
demo = FScopeDemo()
demo.scope.plot.setDatastream(stream)
demo.show()
app.exec_()
stream.close()
if __name__ == '__main__':
main()
| gpl-3.0 |
eshavlyugin/Preferans | newalgo/tf_train_py2.py | 1 | 14069 | import csv
import argparse
import sklearn
import sklearn.ensemble
import numpy
from sklearn.metrics import accuracy_score
from itertools import count
batch_size = 512
num_steps = 200000
num_hidden1 = 50
num_hidden2 = 1
def prepare_train_data(data, label_names, label_name, feature_set, num_labels, use_regression):
if use_regression:
return prepare_train_data_regression(data, label_names, label_name, feature_set, num_labels)
else:
return prepare_train_data_label(data, label_names, label_name, feature_set, num_labels)
def prepare_train_data_regression(data, label_names, label_name, feature_set, num_labels):
req_indexes = set([])
val_indexes = set([])
for (idx, val) in enumerate(label_names):
if val in feature_set:
req_indexes.add(idx)
if val == label_name:
val_indexes.add(idx)
assert len(val_indexes) == num_labels, "value indexes count doesn't match the labels count"
labels = np.array([[float(entry[label_idx]) for label_idx in val_indexes] for entry in data], dtype=np.float32)
dataset = np.array([[float(val) for (idx, val) in enumerate(entry) if idx in req_indexes] for entry in data], dtype=np.float32)
return (labels, dataset)
def prepare_train_data_label(data, label_names, label_name, feature_set, num_labels):
label_idx = -1
assert not label_name in feature_set, "prediction labels couldn't be in feature set!"
req_indexes = set([])
reward_indexes = [];
for (idx, val) in enumerate(label_names):
if val in feature_set:
req_indexes.add(idx)
if val == label_name:
assert label_idx == -1, "label " + label_name + " is not unique"
label_idx = idx
if val == "move_reward":
reward_indexes.append(idx)
assert label_idx != -1, "label " + label_name + " not found"
reward_indexes_set = set(reward_indexes)
from collections import Counter
print('Counter:', Counter([entry[label_idx] for entry in data]))
labels = np.array([[1.0 if i == entry[label_idx] else 0.0 for i in range(0, num_labels)] for entry in data], dtype=np.float32)
#labels = np.array([[entry[reward_indexes[i]] / entry[reward_indexes[int(entry[label_idx])]] for i in range(0, num_labels)] for entry in data], dtype=np.float32)
dataset = np.array([[float(val) for (idx, val) in enumerate(entry) if idx in req_indexes] for entry in data], dtype=np.float32)
#weights = np.array([entry[reward_indexes[int(entry[label_idx])]] - 1.0 / 32 * sum([val if idx in reward_indexes_set else 0.0 for (idx, val) in enumerate(entry)]) for entry in data], dtype=np.float32)
#num_labels = np.array([int(entry[label_idx]) for entry in data], dtype=np.int)
return (labels, dataset)
def correct(predictions, labels):
return 1.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
def accuracy(predictions, labels):
return (1.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])
#def accuracy(predictions, labels):
# return (1.0 * np.sum((predictions > 0.5) == labels)) / predictions.shape[0] / predictions.shape[1]
def load_model_data(model_file):
with open(model_file,'r') as tsv:
label_names = tsv.readline().strip().split('\t');
lines = [[float(part) for part in line.strip().split('\t')] for line in tsv]
total = len(lines)
train_subset = int(total * 0.8)
valid_subset = int(total * 0.1)
test_subset = total - valid_subset - train_subset
print 'Size = ', train_subset, valid_subset, test_subset
all_dataset = lines
train_dataset = np.array(all_dataset[:train_subset], dtype=np.float32)
valid_dataset = np.array(all_dataset[train_subset:train_subset+valid_subset], dtype=np.float32)
test_dataset = np.array(all_dataset[train_subset+valid_subset:], dtype=np.float32)
return (label_names, train_dataset, valid_dataset, test_dataset)
def singlelayer_perceptron(data, weights, biases):
return tf.matmul(data, weights) + biases
def three_layer_perceptron(data, weights, biases, weights2, biases2, weights3, drop):
# Hidden layer with RELU activation
if drop:
data = tf.nn.dropout(data, 0.9)
layer_1 = tf.add(tf.matmul(data, weights), biases)
layer_1 = tf.nn.sigmoid(layer_1)
if drop:
layer_1 = tf.nn.dropout(layer_1, 0.7)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights2), biases2)
layer_2 = tf.nn.sigmoid(layer_2)
if drop:
layer_2 = tf.nn.dropout(layer_2, 0.7)
layer_3 = tf.matmul(layer_2, weights3)
return layer_3
def two_layer_perceptron(data, weights, biases, weights2, drop):
# Hidden layer with RELU activation
#data = tf.nn.dropout(data, 0.8)
if drop:
data = tf.nn.dropout(data, 0.9)
layer_1 = tf.add(tf.matmul(data, weights), biases)
layer_1 = tf.nn.sigmoid(layer_1)
if drop:
layer_1 = tf.nn.dropout(layer_1, 0.7)
#layer_1 = tf.nn.dropout(layer_1, 0.8)
# Hidden layer with RELU activation
layer_2 = tf.matmul(layer_1, weights2)
return layer_2
def multilayer_perceptron(data, weights, biases, weights2, biases2, weights3, drop):
# return singlelayer_perceptron(data, weights, biases)
return two_layer_perceptron(data, weights, biases, weights2, drop)
# return three_layer_perceptron(data, weights, biases, weights2, biases2, weights3, drop)
def train_predict_model(model_data, label_name, feature_set, num_labels, use_regression):
label_names, train_dataset, valid_dataset, test_dataset = model_data
train_labels, train_dataset = prepare_train_data(train_dataset, label_names, label_name, feature_set, num_labels, use_regression)
valid_labels, valid_dataset = prepare_train_data(valid_dataset, label_names, label_name, feature_set, num_labels, use_regression)
test_labels, test_dataset = prepare_train_data(test_dataset, label_names, label_name, feature_set, num_labels, use_regression)
num_features = len(train_dataset[0])
#clf = sklearn.ensemble.RandomForestClassifier(verbose=1, n_estimators=500)
#clf.fit(train_dataset, train_num_labels)
#print(clf.predict(test_dataset))
#print(accuracy_score(test_num_labels,clf.predict(test_dataset)))
#return
#sum(test_num_labels == clf.predict(test_labels))
print('Processing: ', label_name, num_features, num_labels)
graph = tf.Graph()
with graph.as_default():
# Input data.
# Load the training, validation and test data into constants that are
# attached to the graph.
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, num_features))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
#tf_valid_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_train_dataset_const = tf.constant(numpy.concatenate((train_dataset, test_dataset, valid_dataset), axis = 0))
tf_train_labels_const = tf.constant(numpy.concatenate((train_labels, test_labels, valid_labels), axis = 0))
tf_valid_dataset = tf.constant(valid_dataset)
tf_valid_labels = tf.constant(valid_labels)
tf_test_dataset = tf.constant(test_dataset)
# print num_features, num_hidden, num_labels
weights = tf.Variable(tf.truncated_normal([num_features, num_hidden1]))
biases = tf.Variable(tf.zeros([num_hidden1]))
weights2 = tf.Variable(tf.truncated_normal([num_hidden1, num_hidden2]))
biases2 = tf.Variable(tf.zeros([num_hidden2]))
weights3 = tf.Variable(tf.truncated_normal([num_hidden2, num_labels]))
#logits2 = singlelayer_perceptron(tf_train_dataset, weights, biases)
logits2 = multilayer_perceptron(tf_train_dataset, weights, biases, weights2, biases2, weights3, True)
logits_all = multilayer_perceptron(tf_train_dataset_const, weights, biases, weights2, biases2, weights3, False)
logits_valid = multilayer_perceptron(tf_valid_dataset, weights, biases, weights2, biases2, weights3, False)
loss = tf.reduce_mean(tf.square(logits2 - tf_train_labels))
loss_all = tf.reduce_mean(tf.square(logits_all - tf_train_labels_const))
loss_valid = tf.reduce_mean(tf.square(logits_valid - tf_valid_labels))
#loss = tf.reduce_mean(tf.mul(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels), 0), tf_train_weights))
# loss = tf.reduce_mean(tf.neg(tf.mul(tf_train_labels, tf.nn.softmax(logits=logits2))))
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels))
#regularizers = (tf.nn.l2_loss(weights) + tf.nn.l2_loss(biases))
#loss += 1e-5 * regularizers
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
#optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
# Predictions for the training, validation, and test data.
# These are not part of training, but merely here so that we can report
# accuracy figures as we train.
train_prediction = tf.nn.softmax(logits2)
#valid_prediction = tf.nn.softmax(singlelayer_perceptron(tf_valid_dataset, weights, biases))
#test_prediction = tf.nn.softmax(singlelayer_perceptron(tf_test_dataset, weights, biases))
#train_all_prediction = tf.nn.softmax(singlelayer_perceptron(tf_train_dataset_const, weights, biases))
#valid_prediction = tf.reduce_mean(tf.square(tf.nn.softmax(multilayer_perceptron(tf_valid_dataset, weights, biases, weights2, biases2)), tf_valid_labels))
test_prediction = tf.nn.softmax(multilayer_perceptron(tf_test_dataset, weights, biases, weights2, biases2, weights3, False))
valid_prediction = tf.nn.softmax(multilayer_perceptron(tf_valid_dataset, weights, biases, weights2, biases2, weights3, False))
#train_all_prediction = tf.nn.softmax(multilayer_perceptron(tf_train_dataset_const, weights, biases, weights2, biases2))
with tf.Session(graph=graph, config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict = feed_dict)
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Validation loss: %f' % loss_valid.eval())
#print('Train accuracy: %f' % accuracy(predictions, batch_labels))
#print('Training accuracy: %.1f%%' % accuracy(predictions, batch_labels))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
#print('Validation accuracy: %f' % accuracy(valid_prediction.eval(), valid_labels))
#print('Test accuracy: %f' % accuracy(test_prediction.eval(), test_labels))
weights_eval = weights.eval()
biases_eval = biases.eval()
weights2_eval = weights2.eval()
biases2_eval = biases2.eval()
weights3_eval = weights3.eval()
trained_weights_file = label_name + ".tsv"
with open(trained_weights_file, 'w') as f:
f.write('2_layer_nn\n')
f.write(str(num_features) + " " + str(num_hidden1) + " " + "1" + "\n")
f.write('\t'.join([str(i) for i in biases_eval]) + '\n')
for row in weights_eval:
f.write('\t'.join([str(i) for i in row]) + '\n')
for row in weights2_eval:
f.write('\t'.join([str(i) for i in row]) + '\n')
with open("model_error.txt", 'w') as f:
f.write('%f\n' % (loss_all.eval()))
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='Tensorflow model trainer', add_help=True)
parser.add_argument('--play_open', action='store_true', help='do train move predictor model')
parser.add_argument('--play_close', action='store_true', help='do train move predictor model')
parser.add_argument('--prob', action='store_true', help='do train probabilities predictor model')
args = parser.parse_args()
if args.play_open or args.prob or args.play_close:
import tensorflow as tf
import numpy as np
data = load_model_data('model.tsv')
if args.play_open:
train_predict_model(data, 'expected_score', set(['open_cards', 'common_cards']), 1, True)
#train_predict_model(data, 'expected_score_3', set(['open_cards', 'common_cards']), 2, False)
#train_predict_model(data, 'expected_score_6', set(['open_cards', 'common_cards']), 2, False)
if args.play_close:
train_predict_model(data, 'correct_move_close', set(['close_cards', 'common_cards']), 32, False)
if args.prob:
for i in range(0, 32):
train_predict_model(data, 'card' + str(i), set(['close_cards', 'common_cards' 'move']), 4, False)
| gpl-2.0 |
solenoid-bandits/leviosa | controller.py | 1 | 1687 | import numpy as np
from matplotlib import pyplot as plt
from net import Net
class Controller(object):
def __init__(self):
pass
def current(self, pos, target, dt):
# t = time
return (target - pos) * 1.0 # proportional
class PIDController(Controller):
def __init__(self, k_p=1.0, k_i=0.0, k_d=0.0, t=0.0):
super(PIDController,self).__init__()
self.k_p = k_p
self.k_i = k_i
self.k_d = k_d
self.e_i = 0
self.e_d = 0
self.res = 0.0
def current(self, err, dt):
# t = time
if dt == 0:
return self.res
k_p, k_i, k_d = self.k_p, self.k_i, self.k_d
self.e_i += err * dt
self.res = k_p * err + k_i * self.e_i + k_d * (err - self.e_d) / dt;
self.e_d = err # remember last error
return self.res
class GradientController(Controller):
# Controller Based on Gradient Descent
def __init__(self):
self.net = NeuralNet()
pass
def current(self, pos, target, t):
# backprop
# output
pass
if __name__ == "__main__":
T_START = 0.0
T_END = 200 * np.pi
T_N = 10000
ctrl = PIDController(1.0, 0.1, 0.15)
target_pos = 1.0
pos = 0.0
ts = np.linspace(T_START, T_END, T_N)
cs = []
ps = []
for i in range(T_N):
dt = ts[i] - ts[i-1] if i>0 else 0.0
c = ctrl.current(pos,target_pos,ts[i] - ts[i-1])
pos -= np.sin(c)
ps.append(pos)
cs.append(c)
plt.plot(ts, ps)
plt.plot(ts, [target_pos for _ in ts])
plt.plot(ts, cs)
plt.legend(['position','target','current'])
ax = plt.gca()
plt.show()
| mit |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| mit |
devanshdalal/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 28 | 24487 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_in
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [200, 250, 300]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
m = "'init' must be 'pca', 'random', or a numpy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
def test_n_iter_without_progress():
# Make sure that the parameter n_iter_without_progress is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert_in("did not make any progress during the "
"last 2 episodes. Finished.", out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '')
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert_less_equal(n_smaller_gradient_norms, 1)
| bsd-3-clause |
pymedusa/Medusa | ext/dateutil/parser/_parser.py | 8 | 58804 | # -*- coding: utf-8 -*-
"""
This module offers a generic date/time string parser which is able to parse
most known formats to represent a date and/or time.
This module attempts to be forgiving with regards to unlikely input formats,
returning a datetime object even for dates which are ambiguous. If an element
of a date/time stamp is omitted, the following rules are applied:
- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
specified.
- If a time zone is omitted, a timezone-naive datetime is returned.
If any other elements are missing, they are taken from the
:class:`datetime.datetime` object passed to the parameter ``default``. If this
results in a day number exceeding the valid number of days per month, the
value falls back to the end of the month.
Additional resources about date/time string formats can be found below:
- `A summary of the international standard date and time notation
<http://www.cl.cam.ac.uk/~mgk25/iso-time.html>`_
- `W3C Date and Time Formats <http://www.w3.org/TR/NOTE-datetime>`_
- `Time Formats (Planetary Rings Node) <https://pds-rings.seti.org:443/tools/time_formats.html>`_
- `CPAN ParseDate module
<http://search.cpan.org/~muir/Time-modules-2013.0912/lib/Time/ParseDate.pm>`_
- `Java SimpleDateFormat Class
<https://docs.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html>`_
"""
from __future__ import unicode_literals
import datetime
import re
import string
import time
import warnings
from calendar import monthrange
from io import StringIO
import six
from six import integer_types, text_type
from decimal import Decimal
from warnings import warn
from .. import relativedelta
from .. import tz
__all__ = ["parse", "parserinfo", "ParserError"]
# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth
# making public and/or figuring out if there is something we can
# take off their plate.
class _timelex(object):
# Fractional seconds are sometimes split by a comma
_split_decimal = re.compile("([.,])")
def __init__(self, instream):
if six.PY2:
# In Python 2, we can't duck type properly because unicode has
# a 'decode' function, and we'd be double-decoding
if isinstance(instream, (bytes, bytearray)):
instream = instream.decode()
else:
if getattr(instream, 'decode', None) is not None:
instream = instream.decode()
if isinstance(instream, text_type):
instream = StringIO(instream)
elif getattr(instream, 'read', None) is None:
raise TypeError('Parser must be a string or character stream, not '
'{itype}'.format(itype=instream.__class__.__name__))
self.instream = instream
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
"""
This function breaks the time string into lexical units (tokens), which
can be parsed by the parser. Lexical units are demarcated by changes in
the character set, so any continuous string of letters is considered
one unit, any continuous string of numbers is considered one unit.
The main complication arises from the fact that dots ('.') can be used
both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
"4:30:21.447"). As such, it is necessary to read the full context of
any dot-separated strings before breaking it into tokens; as such, this
function maintains a "token stack", for when the ambiguous context
demands that multiple tokens be parsed at once.
"""
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
while not self.eof:
# We only realize that we've reached the end of a token when we
# find a character that's not part of the current token - since
# that character may be part of the next token, it's stored in the
# charstack.
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
# First character of the token - determines if we're starting
# to parse a word, a number or something else.
token = nextchar
if self.isword(nextchar):
state = 'a'
elif self.isnum(nextchar):
state = '0'
elif self.isspace(nextchar):
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
# If we've already started reading a word, we keep reading
# letters until we find something that's not part of a word.
seenletters = True
if self.isword(nextchar):
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
# If we've already started reading a number, we keep reading
# numbers until we find something that doesn't fit.
if self.isnum(nextchar):
token += nextchar
elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
# If we've seen some letters and a dot separator, continue
# parsing, and the tokens will be broken up later.
seenletters = True
if nextchar == '.' or self.isword(nextchar):
token += nextchar
elif self.isnum(nextchar) and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
# If we've seen at least one dot separator, keep going, we'll
# break up the tokens later.
if nextchar == '.' or self.isnum(nextchar):
token += nextchar
elif self.isword(nextchar) and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
token[-1] in '.,')):
l = self._split_decimal.split(token)
token = l[0]
for tok in l[1:]:
if tok:
self.tokenstack.append(tok)
if state == '0.' and token.count('.') == 0:
token = token.replace(',', '.')
return token
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
@classmethod
def split(cls, s):
return list(cls(s))
@classmethod
def isword(cls, nextchar):
""" Whether or not the next character is part of a word """
return nextchar.isalpha()
@classmethod
def isnum(cls, nextchar):
""" Whether the next character is part of a number """
return nextchar.isdigit()
@classmethod
def isspace(cls, nextchar):
""" Whether the next character is whitespace """
return nextchar.isspace()
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __len__(self):
return (sum(getattr(self, attr) is not None
for attr in self.__slots__))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo(object):
"""
Class which handles what inputs are accepted. Subclass this to customize
the language and acceptable values for each parameter.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. Default is ``False``.
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
Default is ``False``.
"""
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"), # TODO: "Tues"
("Wed", "Wednesday"),
("Thu", "Thursday"), # TODO: "Thurs"
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"), # TODO: "Febr"
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z", "z"]
PERTAIN = ["of"]
TZOFFSET = {}
# TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate",
# "Anno Domini", "Year of Our Lord"]
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year // 100 * 100
def _convert(self, lst):
dct = {}
for i, v in enumerate(lst):
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
try:
return self._months[name.lower()] + 1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year, century_specified=False):
"""
Converts two-digit years to year within [-50, 49]
range of self._year (current local time)
"""
# Function contract is that the year is always positive
assert year >= 0
if year < 100 and not century_specified:
# assume current century to start
year += self._century
if year >= self._year + 50: # if too far in future
year -= 100
elif year < self._year - 50: # if too far in past
year += 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year, res.century_specified)
if ((res.tzoffset == 0 and not res.tzname) or
(res.tzname == 'Z' or res.tzname == 'z')):
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class _ymd(list):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.century_specified = False
self.dstridx = None
self.mstridx = None
self.ystridx = None
@property
def has_year(self):
return self.ystridx is not None
@property
def has_month(self):
return self.mstridx is not None
@property
def has_day(self):
return self.dstridx is not None
def could_be_day(self, value):
if self.has_day:
return False
elif not self.has_month:
return 1 <= value <= 31
elif not self.has_year:
# Be permissive, assume leap year
month = self[self.mstridx]
return 1 <= value <= monthrange(2000, month)[1]
else:
month = self[self.mstridx]
year = self[self.ystridx]
return 1 <= value <= monthrange(year, month)[1]
def append(self, val, label=None):
if hasattr(val, '__len__'):
if val.isdigit() and len(val) > 2:
self.century_specified = True
if label not in [None, 'Y']: # pragma: no cover
raise ValueError(label)
label = 'Y'
elif val > 100:
self.century_specified = True
if label not in [None, 'Y']: # pragma: no cover
raise ValueError(label)
label = 'Y'
super(self.__class__, self).append(int(val))
if label == 'M':
if self.has_month:
raise ValueError('Month is already set')
self.mstridx = len(self) - 1
elif label == 'D':
if self.has_day:
raise ValueError('Day is already set')
self.dstridx = len(self) - 1
elif label == 'Y':
if self.has_year:
raise ValueError('Year is already set')
self.ystridx = len(self) - 1
def _resolve_from_stridxs(self, strids):
"""
Try to resolve the identities of year/month/day elements using
ystridx, mstridx, and dstridx, if enough of these are specified.
"""
if len(self) == 3 and len(strids) == 2:
# we can back out the remaining stridx value
missing = [x for x in range(3) if x not in strids.values()]
key = [x for x in ['y', 'm', 'd'] if x not in strids]
assert len(missing) == len(key) == 1
key = key[0]
val = missing[0]
strids[key] = val
assert len(self) == len(strids) # otherwise this should not be called
out = {key: self[strids[key]] for key in strids}
return (out.get('y'), out.get('m'), out.get('d'))
def resolve_ymd(self, yearfirst, dayfirst):
len_ymd = len(self)
year, month, day = (None, None, None)
strids = (('y', self.ystridx),
('m', self.mstridx),
('d', self.dstridx))
strids = {key: val for key, val in strids if val is not None}
if (len(self) == len(strids) > 0 or
(len(self) == 3 and len(strids) == 2)):
return self._resolve_from_stridxs(strids)
mstridx = self.mstridx
if len_ymd > 3:
raise ValueError("More than three YMD values")
elif len_ymd == 1 or (mstridx is not None and len_ymd == 2):
# One member, or two members with a month string
if mstridx is not None:
month = self[mstridx]
# since mstridx is 0 or 1, self[mstridx-1] always
# looks up the other element
other = self[mstridx - 1]
else:
other = self[0]
if len_ymd > 1 or mstridx is None:
if other > 31:
year = other
else:
day = other
elif len_ymd == 2:
# Two members with numbers
if self[0] > 31:
# 99-01
year, month = self
elif self[1] > 31:
# 01-99
month, year = self
elif dayfirst and self[1] <= 12:
# 13-01
day, month = self
else:
# 01-13
month, day = self
elif len_ymd == 3:
# Three members
if mstridx == 0:
if self[1] > 31:
# Apr-2003-25
month, year, day = self
else:
month, day, year = self
elif mstridx == 1:
if self[0] > 31 or (yearfirst and self[2] <= 31):
# 99-Jan-01
year, month, day = self
else:
# 01-Jan-01
# Give precedence to day-first, since
# two-digit years is usually hand-written.
day, month, year = self
elif mstridx == 2:
# WTF!?
if self[1] > 31:
# 01-99-Jan
day, year, month = self
else:
# 99-01-Jan
year, day, month = self
else:
if (self[0] > 31 or
self.ystridx == 0 or
(yearfirst and self[1] <= 12 and self[2] <= 31)):
# 99-01-01
if dayfirst and self[2] <= 12:
year, day, month = self
else:
year, month, day = self
elif self[0] > 12 or (dayfirst and self[1] <= 12):
# 13-01-01
day, month, year = self
else:
# 01-13-01
month, day, year = self
return year, month, day
class parser(object):
def __init__(self, info=None):
self.info = info or parserinfo()
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None, **kwargs):
"""
Parse the date/time string into a :class:`datetime.datetime` object.
:param timestr:
Any date/time string using the supported formats.
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a
naive :class:`datetime.datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in seconds or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param \\*\\*kwargs:
Keyword arguments as passed to ``_parse()``.
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ParserError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises TypeError:
Raised for non-string or character stream input.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if default is None:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res, skipped_tokens = self._parse(timestr, **kwargs)
if res is None:
raise ParserError("Unknown string format: %s", timestr)
if len(res) == 0:
raise ParserError("String does not contain a date: %s", timestr)
try:
ret = self._build_naive(res, default)
except ValueError as e:
six.raise_from(ParserError(e.args[0] + ": %s", timestr), e)
if not ignoretz:
ret = self._build_tzaware(ret, res, tzinfos)
if kwargs.get('fuzzy_with_tokens', False):
return ret, skipped_tokens
else:
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset", "ampm","any_unused_tokens"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
fuzzy_with_tokens=False):
"""
Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
"""
if fuzzy_with_tokens:
fuzzy = True
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr) # Splits the timestr into tokens
skipped_idxs = []
# year/month/day list
ymd = _ymd()
len_l = len(l)
i = 0
try:
while i < len_l:
# Check if it's a number
value_repr = l[i]
try:
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Numeric token
i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy)
# Check weekday
elif info.weekday(l[i]) is not None:
value = info.weekday(l[i])
res.weekday = value
# Check month name
elif info.month(l[i]) is not None:
value = info.month(l[i])
ymd.append(value, 'M')
if i + 1 < len_l:
if l[i + 1] in ('-', '/'):
# Jan-01[-99]
sep = l[i + 1]
ymd.append(l[i + 2])
if i + 3 < len_l and l[i + 3] == sep:
# Jan-01-99
ymd.append(l[i + 4])
i += 2
i += 2
elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and
info.pertain(l[i + 2])):
# Jan of 01
# In this case, 01 is clearly year
if l[i + 4].isdigit():
# Convert it here to become unambiguous
value = int(l[i + 4])
year = str(info.convertyear(value))
ymd.append(year, 'Y')
else:
# Wrong guess
pass
# TODO: not hit in tests
i += 4
# Check am/pm
elif info.ampm(l[i]) is not None:
value = info.ampm(l[i])
val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy)
if val_is_ampm:
res.hour = self._adjust_ampm(res.hour, value)
res.ampm = value
elif fuzzy:
skipped_idxs.append(i)
# Check for a timezone name
elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i + 1 < len_l and l[i + 1] in ('+', '-'):
l[i + 1] = ('+', '-')[l[i + 1] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
# Check for a numbered timezone
elif res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
len_li = len(l[i + 1])
# TODO: check that l[i + 1] is integer?
if len_li == 4:
# -0300
hour_offset = int(l[i + 1][:2])
min_offset = int(l[i + 1][2:])
elif i + 2 < len_l and l[i + 2] == ':':
# -03:00
hour_offset = int(l[i + 1])
min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like?
i += 2
elif len_li <= 2:
# -[0]3
hour_offset = int(l[i + 1][:2])
min_offset = 0
else:
raise ValueError(timestr)
res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60)
# Look for a timezone name between parenthesis
if (i + 5 < len_l and
info.jump(l[i + 2]) and l[i + 3] == '(' and
l[i + 5] == ')' and
3 <= len(l[i + 4]) and
self._could_be_tzname(res.hour, res.tzname,
None, l[i + 4])):
# -0300 (BRST)
res.tzname = l[i + 4]
i += 4
i += 1
# Check jumps
elif not (info.jump(l[i]) or fuzzy):
raise ValueError(timestr)
else:
skipped_idxs.append(i)
i += 1
# Process year/month/day
year, month, day = ymd.resolve_ymd(yearfirst, dayfirst)
res.century_specified = ymd.century_specified
res.year = year
res.month = month
res.day = day
except (IndexError, ValueError):
return None, None
if not info.validate(res):
return None, None
if fuzzy_with_tokens:
skipped_tokens = self._recombine_skipped(l, skipped_idxs)
return res, tuple(skipped_tokens)
else:
return res, None
def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy):
# Token is a number
value_repr = tokens[idx]
try:
value = self._to_decimal(value_repr)
except Exception as e:
six.raise_from(ValueError('Unknown numeric token'), e)
len_li = len(value_repr)
len_l = len(tokens)
if (len(ymd) == 3 and len_li in (2, 4) and
res.hour is None and
(idx + 1 >= len_l or
(tokens[idx + 1] != ':' and
info.hms(tokens[idx + 1]) is None))):
# 19990101T23[59]
s = tokens[idx]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = tokens[idx]
if not ymd and '.' not in tokens[idx]:
ymd.append(s[:2])
ymd.append(s[2:4])
ymd.append(s[4:])
else:
# 19990101T235959[.59]
# TODO: Check if res attributes already set.
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = self._parsems(s[4:])
elif len_li in (8, 12, 14):
# YYYYMMDD
s = tokens[idx]
ymd.append(s[:4], 'Y')
ymd.append(s[4:6])
ymd.append(s[6:8])
if len_li > 8:
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li > 12:
res.second = int(s[12:])
elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None:
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True)
(idx, hms) = self._parse_hms(idx, tokens, info, hms_idx)
if hms is not None:
# TODO: checking that hour/minute/second are not
# already set?
self._assign_hms(res, value_repr, hms)
elif idx + 2 < len_l and tokens[idx + 1] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this?
(res.minute, res.second) = self._parse_min_sec(value)
if idx + 4 < len_l and tokens[idx + 3] == ':':
res.second, res.microsecond = self._parsems(tokens[idx + 4])
idx += 2
idx += 2
elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'):
sep = tokens[idx + 1]
ymd.append(value_repr)
if idx + 2 < len_l and not info.jump(tokens[idx + 2]):
if tokens[idx + 2].isdigit():
# 01-01[-01]
ymd.append(tokens[idx + 2])
else:
# 01-Jan[-01]
value = info.month(tokens[idx + 2])
if value is not None:
ymd.append(value, 'M')
else:
raise ValueError()
if idx + 3 < len_l and tokens[idx + 3] == sep:
# We have three members
value = info.month(tokens[idx + 4])
if value is not None:
ymd.append(value, 'M')
else:
ymd.append(tokens[idx + 4])
idx += 2
idx += 1
idx += 1
elif idx + 1 >= len_l or info.jump(tokens[idx + 1]):
if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None:
# 12 am
hour = int(value)
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2]))
idx += 1
else:
# Year, month or day
ymd.append(value)
idx += 1
elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24):
# 12am
hour = int(value)
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1]))
idx += 1
elif ymd.could_be_day(value):
ymd.append(value)
elif not fuzzy:
raise ValueError()
return idx
def _find_hms_idx(self, idx, tokens, info, allow_jump):
len_l = len(tokens)
if idx+1 < len_l and info.hms(tokens[idx+1]) is not None:
# There is an "h", "m", or "s" label following this token. We take
# assign the upcoming label to the current token.
# e.g. the "12" in 12h"
hms_idx = idx + 1
elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and
info.hms(tokens[idx+2]) is not None):
# There is a space and then an "h", "m", or "s" label.
# e.g. the "12" in "12 h"
hms_idx = idx + 2
elif idx > 0 and info.hms(tokens[idx-1]) is not None:
# There is a "h", "m", or "s" preceding this token. Since neither
# of the previous cases was hit, there is no label following this
# token, so we use the previous label.
# e.g. the "04" in "12h04"
hms_idx = idx-1
elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and
info.hms(tokens[idx-2]) is not None):
# If we are looking at the final token, we allow for a
# backward-looking check to skip over a space.
# TODO: Are we sure this is the right condition here?
hms_idx = idx - 2
else:
hms_idx = None
return hms_idx
def _assign_hms(self, res, value_repr, hms):
# See GH issue #427, fixing float rounding
value = self._to_decimal(value_repr)
if hms == 0:
# Hour
res.hour = int(value)
if value % 1:
res.minute = int(60*(value % 1))
elif hms == 1:
(res.minute, res.second) = self._parse_min_sec(value)
elif hms == 2:
(res.second, res.microsecond) = self._parsems(value_repr)
def _could_be_tzname(self, hour, tzname, tzoffset, token):
return (hour is not None and
tzname is None and
tzoffset is None and
len(token) <= 5 and
(all(x in string.ascii_uppercase for x in token)
or token in self.info.UTCZONE))
def _ampm_valid(self, hour, ampm, fuzzy):
"""
For fuzzy parsing, 'a' or 'am' (both valid English words)
may erroneously trigger the AM/PM flag. Deal with that
here.
"""
val_is_ampm = True
# If there's already an AM/PM flag, this one isn't one.
if fuzzy and ampm is not None:
val_is_ampm = False
# If AM/PM is found and hour is not, raise a ValueError
if hour is None:
if fuzzy:
val_is_ampm = False
else:
raise ValueError('No hour specified with AM or PM flag.')
elif not 0 <= hour <= 12:
# If AM/PM is found, it's a 12 hour clock, so raise
# an error for invalid range
if fuzzy:
val_is_ampm = False
else:
raise ValueError('Invalid hour specified for 12-hour clock.')
return val_is_ampm
def _adjust_ampm(self, hour, ampm):
if hour < 12 and ampm == 1:
hour += 12
elif hour == 12 and ampm == 0:
hour = 0
return hour
def _parse_min_sec(self, value):
# TODO: Every usage of this function sets res.second to the return
# value. Are there any cases where second will be returned as None and
# we *don't* want to set res.second = None?
minute = int(value)
second = None
sec_remainder = value % 1
if sec_remainder:
second = int(60 * sec_remainder)
return (minute, second)
def _parse_hms(self, idx, tokens, info, hms_idx):
# TODO: Is this going to admit a lot of false-positives for when we
# just happen to have digits and "h", "m" or "s" characters in non-date
# text? I guess hex hashes won't have that problem, but there's plenty
# of random junk out there.
if hms_idx is None:
hms = None
new_idx = idx
elif hms_idx > idx:
hms = info.hms(tokens[hms_idx])
new_idx = hms_idx
else:
# Looking backwards, increment one.
hms = info.hms(tokens[hms_idx]) + 1
new_idx = idx
return (new_idx, hms)
# ------------------------------------------------------------------
# Handling for individual tokens. These are kept as methods instead
# of functions for the sake of customizability via subclassing.
def _parsems(self, value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
def _to_decimal(self, val):
try:
decimal_value = Decimal(val)
# See GH 662, edge case, infinite value should not be converted
# via `_to_decimal`
if not decimal_value.is_finite():
raise ValueError("Converted decimal value is infinite or NaN")
except Exception as e:
msg = "Could not convert %s to decimal" % val
six.raise_from(ValueError(msg), e)
else:
return decimal_value
# ------------------------------------------------------------------
# Post-Parsing construction of datetime output. These are kept as
# methods instead of functions for the sake of customizability via
# subclassing.
def _build_tzinfo(self, tzinfos, tzname, tzoffset):
if callable(tzinfos):
tzdata = tzinfos(tzname, tzoffset)
else:
tzdata = tzinfos.get(tzname)
# handle case where tzinfo is paased an options that returns None
# eg tzinfos = {'BRST' : None}
if isinstance(tzdata, datetime.tzinfo) or tzdata is None:
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(tzname, tzdata)
else:
raise TypeError("Offset must be tzinfo subclass, tz string, "
"or int offset.")
return tzinfo
def _build_tzaware(self, naive, res, tzinfos):
if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)):
tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset)
aware = naive.replace(tzinfo=tzinfo)
aware = self._assign_tzname(aware, res.tzname)
elif res.tzname and res.tzname in time.tzname:
aware = naive.replace(tzinfo=tz.tzlocal())
# Handle ambiguous local datetime
aware = self._assign_tzname(aware, res.tzname)
# This is mostly relevant for winter GMT zones parsed in the UK
if (aware.tzname() != res.tzname and
res.tzname in self.info.UTCZONE):
aware = aware.replace(tzinfo=tz.UTC)
elif res.tzoffset == 0:
aware = naive.replace(tzinfo=tz.UTC)
elif res.tzoffset:
aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
elif not res.tzname and not res.tzoffset:
# i.e. no timezone information was found.
aware = naive
elif res.tzname:
# tz-like string was parsed but we don't know what to do
# with it
warnings.warn("tzname {tzname} identified but not understood. "
"Pass `tzinfos` argument in order to correctly "
"return a timezone-aware datetime. In a future "
"version, this will raise an "
"exception.".format(tzname=res.tzname),
category=UnknownTimezoneWarning)
aware = naive
return aware
def _build_naive(self, res, default):
repl = {}
for attr in ("year", "month", "day", "hour",
"minute", "second", "microsecond"):
value = getattr(res, attr)
if value is not None:
repl[attr] = value
if 'day' not in repl:
# If the default day exceeds the last day of the month, fall back
# to the end of the month.
cyear = default.year if res.year is None else res.year
cmonth = default.month if res.month is None else res.month
cday = default.day if res.day is None else res.day
if cday > monthrange(cyear, cmonth)[1]:
repl['day'] = monthrange(cyear, cmonth)[1]
naive = default.replace(**repl)
if res.weekday is not None and not res.day:
naive = naive + relativedelta.relativedelta(weekday=res.weekday)
return naive
def _assign_tzname(self, dt, tzname):
if dt.tzname() != tzname:
new_dt = tz.enfold(dt, fold=1)
if new_dt.tzname() == tzname:
return new_dt
return dt
def _recombine_skipped(self, tokens, skipped_idxs):
"""
>>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"]
>>> skipped_idxs = [0, 1, 2, 5]
>>> _recombine_skipped(tokens, skipped_idxs)
["foo bar", "baz"]
"""
skipped_tokens = []
for i, idx in enumerate(sorted(skipped_idxs)):
if i > 0 and idx - 1 == skipped_idxs[i - 1]:
skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx]
else:
skipped_tokens.append(tokens[idx])
return skipped_tokens
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
"""
Parse a string in one of the supported formats, using the
``parserinfo`` parameters.
:param timestr:
A string containing a date/time stamp.
:param parserinfo:
A :class:`parserinfo` object containing parameters for the parser.
If ``None``, the default arguments to the :class:`parserinfo`
constructor are used.
The ``**kwargs`` parameter takes the following keyword arguments:
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a naive
:class:`datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in seconds or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM and
YMD. If set to ``None``, this value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken to
be the year, otherwise the last number is taken to be the year. If
this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ValueError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x]
used_idxs = list()
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
for ii in range(j):
used_idxs.append(ii)
i = j
if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
"0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
used_idxs.append(i)
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr, (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60) * signal)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i]) * 3600 +
int(l[i + 2]) * 60) * signal)
used_idxs.append(i)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2]) * 3600 * signal)
else:
return None
used_idxs.append(i)
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';':
l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789+-"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
used_idxs.append(i)
i += 2
if l[i] == '-':
value = int(l[i + 1]) * -1
used_idxs.append(i)
i += 1
else:
value = int(l[i])
used_idxs.append(i)
i += 2
if value:
x.week = value
x.weekday = (int(l[i]) - 1) % 7
else:
x.day = int(l[i])
used_idxs.append(i)
i += 2
x.time = int(l[i])
used_idxs.append(i)
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
used_idxs.append(i)
i += 1
else:
signal = 1
used_idxs.append(i)
res.dstoffset = (res.stdoffset + int(l[i]) * signal)
# This was a made-up format that is not in normal use
warn(('Parsed time zone "%s"' % tzstr) +
'is in a non-standard dateutil-specific format, which ' +
'is now deprecated; support for parsing this format ' +
'will be removed in future versions. It is recommended ' +
'that you switch to a standard format like the GNU ' +
'TZ variable format.', tz.DeprecatedTzFormatWarning)
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
used_idxs.append(i)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
used_idxs.append(i)
i += 1
x.month = int(l[i])
used_idxs.append(i)
i += 1
assert l[i] in ('-', '.')
used_idxs.append(i)
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
used_idxs.append(i)
i += 1
assert l[i] in ('-', '.')
used_idxs.append(i)
i += 1
x.weekday = (int(l[i]) - 1) % 7
else:
# year day (zero based)
x.yday = int(l[i]) + 1
used_idxs.append(i)
i += 1
if i < len_l and l[i] == '/':
used_idxs.append(i)
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
used_idxs.append(i)
i += 2
if i + 1 < len_l and l[i + 1] == ':':
used_idxs.append(i)
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2]) * 3600)
else:
return None
used_idxs.append(i)
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
unused_idxs = set(range(len_l)).difference(used_idxs)
res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"})
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
class ParserError(ValueError):
"""Error class for representing failure to parse a datetime string."""
def __str__(self):
try:
return self.args[0] % self.args[1:]
except (TypeError, IndexError):
return super(ParserError, self).__str__()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self))
class UnknownTimezoneWarning(RuntimeWarning):
"""Raised when the parser finds a timezone it cannot parse into a tzinfo"""
# vim:ts=4:sw=4:et
| gpl-3.0 |
tengerye/orthogonal-denoising-autoencoder | TensorFlow/demo.py | 2 | 2335 | import matplotlib
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import scipy.io
from sklearn.cross_decomposition import CCA
from orthAE import OrthdAE
# generate toy data for multi-view learning from paper "Factorized Latent Spaces with Structured Sparsity"
t = np.arange(-1, 1, 0.02)
x = np.sin(2*np.pi*t) # share latent space
x_noise = 0.02*np.sin(3.6*np.pi*t) # correlated noise
# private latent spaces
z1 = np.cos(np.pi*np.pi*t)
z2 = np.cos(5*np.pi*t)
##########################################################
# Fig.2.(a)
f, axarr = plt.subplots(2, sharex=True)
axarr[0].plot(t, x, color='blue')
axarr[0].plot(t, z1, color='green')
axarr[0].plot(t, x_noise, color='red')
axarr[0].set_title('Fig.2.(a)')
axarr[1].plot(t, x, color='blue')
axarr[1].plot(t, z2, color='green')
axarr[1].plot(t, x_noise, color='red')
plt.show()
##########################################################
# shared private spaces
m1 = np.vstack((x, z1));
m2 = np.vstack((x, z2));
m1 = np.random.rand(20, 2).dot(m1)
m2 = np.random.rand(20, 2).dot(m2)
# m1 = np.matmul(np.random.rand(20, 2), m1)
# m2 = np.matmul(np.random.rand(20, 2), m2)
# m1 = np.dot(np.random.rand(20, 2), m1)
# m2 = np.dot(np.random.rand(20, 2), m2)
# add gaussian noise with mean=0, standard deviation=0.01
m1 = m1 + np.random.randn(*m1.shape)*0.01;
m2 = m2 + np.random.randn(*m2.shape)*0.01;
# add correlated noise
m1 = np.vstack((m1, x_noise))
m2 = np.vstack((m2, x_noise))
##########################################################
# Fig.2.(b)
f, axarr = plt.subplots(2, sharex=True)
axarr[0].plot(t, m1.transpose())
axarr[0].set_title('Fig.2.(b)')
axarr[1].plot(t, m2.transpose())
plt.show()
##########################################################
# Fig.3 CCA
cca = CCA(n_components=3)
cca.fit(m1.T, m2.T)
X_c = cca.transform(m1.T)
fig, ax = plt.subplots()
ax.set_title('Fig.2.(c)')
# ax.set_color_cycle(['blue', 'green', 'red'])
ax.set_prop_cycle('color', ['blue', 'red', 'green'])
ax.plot(X_c)
# ax.plot(Y_c)
plt.show()
##########################################################
# Use TensorFlow.
x2 = np.concatenate((m1, m2,))
# y2 = trivial_denoising(x2)
# print('shape of y2=', np.shape(y2))
odae = OrthdAE([21, 21], [1, 1, 1])
odae.train(x2.T, max_iter=1000000)
result = odae.transform(x2.T)
plt.plot(result)
plt.show()
| apache-2.0 |
zfrenchee/pandas | pandas/tests/indexes/period/test_period.py | 1 | 26492 | import pytest
import numpy as np
import pandas as pd
import pandas.util._test_decorators as td
from pandas.util import testing as tm
from pandas import (PeriodIndex, period_range, notna, DatetimeIndex, NaT,
Index, Period, Int64Index, Series, DataFrame, date_range,
offsets)
from ..datetimelike import DatetimeLike
class TestPeriodIndex(DatetimeLike):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def setup_method(self, method):
self.indices = dict(index=tm.makePeriodIndex(10),
index_dec=period_range('20130101', periods=10,
freq='D')[::-1])
self.setup_indices()
def create_index(self):
return period_range('20130101', periods=5, freq='D')
def test_astype_conversion(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
result = idx.astype(object)
expected = Index([Period('2016-05-16', freq='D')] +
[Period(NaT, freq='D')] * 3, dtype='object')
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([16937] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index(str(x) for x in idx)
tm.assert_index_equal(result, expected)
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
tm.assert_index_equal(result, Index(idx.asi8))
tm.assert_numpy_array_equal(result.values, idx.asi8)
@pytest.mark.parametrize('dtype', [
float, 'timedelta64', 'timedelta64[ns]'])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
msg = 'Cannot cast PeriodIndex to dtype'
with tm.assert_raises_regex(TypeError, msg):
idx.astype(dtype)
def test_pickle_compat_construction(self):
pass
def test_pickle_round_trip(self):
for freq in ['D', 'M', 'A']:
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq=freq)
result = tm.round_trip_pickle(idx)
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where(self, klass):
i = self.create_index()
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([NaT] + i[1:].tolist(), freq='D')
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = self.create_index()
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_repeat(self):
# GH10183
idx = pd.period_range('2000-01-01', periods=3, freq='D')
res = idx.repeat(3)
exp = PeriodIndex(idx.values.repeat(3), freq='D')
tm.assert_index_equal(res, exp)
assert res.freqstr == 'D'
def test_fillna_period(self):
# GH 11343
idx = pd.PeriodIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], freq='H')
exp = pd.PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H')
tm.assert_index_equal(
idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
tm.assert_index_equal(idx.fillna('x'), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'),
pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
tm.assert_index_equal(idx.fillna(
pd.Period('2011-01-01', freq='D')), exp)
def test_no_millisecond_field(self):
with pytest.raises(AttributeError):
DatetimeIndex.millisecond
with pytest.raises(AttributeError):
DatetimeIndex([]).millisecond
def test_difference_freq(self):
# GH14323: difference of Period MUST preserve frequency
# but the ability to union results must be preserved
index = period_range("20160920", "20160925", freq="D")
other = period_range("20160921", "20160924", freq="D")
expected = PeriodIndex(["20160920", "20160925"], freq='D')
idx_diff = index.difference(other)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = period_range("20160922", "20160925", freq="D")
idx_diff = index.difference(other)
expected = PeriodIndex(["20160920", "20160921"], freq='D')
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
def test_hash_error(self):
index = period_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
assert isinstance(series, Series)
def test_shallow_copy_empty(self):
# GH13067
idx = PeriodIndex([], freq='M')
result = idx._shallow_copy()
expected = idx
tm.assert_index_equal(result, expected)
def test_dtype_str(self):
pi = pd.PeriodIndex([], freq='M')
assert pi.dtype_str == 'period[M]'
assert pi.dtype_str == str(pi.dtype)
pi = pd.PeriodIndex([], freq='3M')
assert pi.dtype_str == 'period[3M]'
assert pi.dtype_str == str(pi.dtype)
def test_view_asi8(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
def test_values(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
def test_period_index_length(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
pytest.raises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
pytest.raises(ValueError, PeriodIndex, vals)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2005')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='D', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='B', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='H', start='12/31/2001', end='1/1/2002 23:00')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Min', start='12/31/2001', end='1/1/2002 00:20')
self._check_all_fields(pi)
pi = PeriodIndex(freq='S', start='12/31/2001 00:00:00',
end='12/31/2001 00:05:00')
self._check_all_fields(pi)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'dayofyear',
'quarter', 'qyear', 'days_in_month']
periods = list(periodindex)
s = pd.Series(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
assert len(periodindex) == len(field_idx)
for x, val in zip(periods, field_idx):
assert getattr(x, field) == val
if len(s) == 0:
continue
field_s = getattr(s.dt, field)
assert len(periodindex) == len(field_s)
for x, val in zip(periods, field_s):
assert getattr(x, field) == val
def test_period_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = period_range('2011/01/01', periods=6, freq='M')
idx2 = period_range('2013', periods=6, freq='A')
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.set_index(idx2)
tm.assert_index_equal(df.index, idx2)
def test_factorize(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M')
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
def test_astype_object(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.astype(object).values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
def test_is_(self):
create_index = lambda: PeriodIndex(freq='A', start='1/1/2001',
end='12/1/2009')
index = create_index()
assert index.is_(index)
assert not index.is_(create_index())
assert index.is_(index.view())
assert index.is_(index.view().view().view().view().view())
assert index.view().is_(index)
ind2 = index.view()
index.name = "Apple"
assert ind2.is_(index)
assert not index.is_(index[:])
assert not index.is_(index.asfreq('M'))
assert not index.is_(index.asfreq('A'))
assert not index.is_(index - 2)
assert not index.is_(index - 0)
def test_comp_period(self):
idx = period_range('2007-01', periods=20, freq='M')
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
def test_contains(self):
rng = period_range('2007-01', freq='M', periods=10)
assert Period('2007-01', freq='M') in rng
assert not Period('2007-01', freq='D') in rng
assert not Period('2007-01', freq='2M') in rng
def test_contains_nat(self):
# see gh-13582
idx = period_range('2007-01', freq='M', periods=10)
assert pd.NaT not in idx
assert None not in idx
assert float('nan') not in idx
assert np.nan not in idx
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
assert pd.NaT in idx
assert None in idx
assert float('nan') in idx
assert np.nan in idx
def test_periods_number_check(self):
with pytest.raises(ValueError):
period_range('2011-1-1', '2012-1-1', 'B')
def test_start_time(self):
index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='MS')
tm.assert_index_equal(index.start_time, expected_index)
def test_end_time(self):
index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='M')
tm.assert_index_equal(index.end_time, expected_index)
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[1:3]
tm.assert_series_equal(result, expected)
result[:] = 1
assert (ts[1:3] == 1).all()
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[idx == 2007]
tm.assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN')
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN',
tz='US/Eastern')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN',
tz='US/Eastern')
tm.assert_index_equal(idx.unique(), expected)
assert idx.nunique() == 3
def test_shift_gh8083(self):
# test shift for PeriodIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
def test_shift(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
tm.assert_index_equal(pi1.shift(0), pi1)
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
assert len(pi1) == len(pi2)
tm.assert_index_equal(pi1.shift(-1), pi2)
def test_shift_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(1)
expected = PeriodIndex(['2011-02', '2011-03', 'NaT',
'2011-05'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
assert result.name == expected.name
@td.skip_if_32bit
def test_ndarray_compat_properties(self):
super(TestPeriodIndex, self).test_ndarray_compat_properties()
def test_shift_ndarray(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, 2, 3, 4]))
expected = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, -2, 3, -4]))
expected = PeriodIndex(['2011-02', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
def test_negative_ordinals(self):
Period(ordinal=-1000, freq='A')
Period(ordinal=0, freq='A')
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
tm.assert_index_equal(idx1, idx2)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2012-03', '2012-04'], freq='D', name='name')
exp = Index([2011, 2011, -1, 2012, 2012], dtype=np.int64, name='name')
tm.assert_index_equal(idx.year, exp)
exp = Index([1, 2, -1, 3, 4], dtype=np.int64, name='name')
tm.assert_index_equal(idx.month, exp)
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
assert s['05Q4'] == s[2]
def test_numpy_repeat(self):
index = period_range('20010101', periods=2)
expected = PeriodIndex([Period('2001-01-01'), Period('2001-01-01'),
Period('2001-01-02'), Period('2001-01-02')])
tm.assert_index_equal(np.repeat(index, 2), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, index, 2, axis=1)
def test_pindex_multiples(self):
pi = PeriodIndex(start='1/1/11', end='12/31/11', freq='2M')
expected = PeriodIndex(['2011-01', '2011-03', '2011-05', '2011-07',
'2011-09', '2011-11'], freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
pi = period_range(start='1/1/11', end='12/31/11', freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
pi = period_range(start='1/1/11', periods=6, freq='2M')
tm.assert_index_equal(pi, expected)
assert pi.freq == offsets.MonthEnd(2)
assert pi.freqstr == '2M'
def test_iteration(self):
index = PeriodIndex(start='1/1/10', periods=4, freq='B')
result = list(index)
assert isinstance(result[0], Period)
assert result[0].freq == index.freq
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2006, 2007], freq='A')
assert index.is_full
index = PeriodIndex([2005, 2005, 2007], freq='A')
assert not index.is_full
index = PeriodIndex([2005, 2005, 2006], freq='A')
assert index.is_full
index = PeriodIndex([2006, 2005, 2005], freq='A')
pytest.raises(ValueError, getattr, index, 'is_full')
assert index[:0].is_full
def test_with_multi_index(self):
# #1705
index = date_range('1/1/2012', periods=4, freq='12H')
index_as_arrays = [index.to_period(freq='D'), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
assert isinstance(s.index.levels[0], PeriodIndex)
assert isinstance(s.index.values[0][0], Period)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
periods = list(rng)
result = pd.Index(periods)
assert isinstance(result, PeriodIndex)
def test_append_concat(self):
# #1815
d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC')
d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC')
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_pickle_freq(self):
# GH2891
prng = period_range('1/1/2011', '1/1/2012', freq='M')
new_prng = tm.round_trip_pickle(prng)
assert new_prng.freq == offsets.MonthEnd()
assert new_prng.freqstr == 'M'
def test_map(self):
# test_map_dictlike generally tests
index = PeriodIndex([2005, 2007, 2009], freq='A')
result = index.map(lambda x: x.ordinal)
exp = Index([x.ordinal for x in index])
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('how', ['outer', 'inner', 'left', 'right'])
def test_join_self(self, how):
index = period_range('1/1/2000', periods=10)
joined = index.join(index, how=how)
assert index is joined
def test_insert(self):
# GH 18295 (test missing)
expected = PeriodIndex(
['2017Q1', pd.NaT, '2017Q2', '2017Q3', '2017Q4'], freq='Q')
for na in (np.nan, pd.NaT, None):
result = period_range('2017Q1', periods=4, freq='Q').insert(1, na)
tm.assert_index_equal(result, expected)
| bsd-3-clause |
gweidner/incubator-systemml | scripts/perftest/python/google_docs/update.py | 15 | 4666 | #!/usr/bin/env python3
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import sys
import os.path
import argparse
import pandas as pd
from oauth2client.service_account import ServiceAccountCredentials
import gspread
# Update data to google sheets
def parse_data(file_path):
"""
Skip reading 1st row : Header
Skip reading last row : Footer
"""
csv_file = pd.read_csv(file_path, sep=',', skiprows=1, skipfooter=1, engine='python')
algo = csv_file['INFO:root:algorithm'].apply(lambda x: x.split(':')[-1])
key = algo + '_'+ csv_file['run_type'] + '_' + csv_file['intercept'] + '_' + \
csv_file['matrix_type'] + '_' + csv_file['data_shape']
return key, csv_file['time_sec']
def auth(path, sheet_name):
"""
Responsible for authorization
"""
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name(path, scope)
gc = gspread.authorize(creds)
sheet = gc.open("Perf").worksheet(sheet_name)
return sheet
def insert_pair(algo, time, start_col, tag):
"""
Wrapper function that calls insert_values to insert algo and time
"""
insert_values(sheet, algo, start_col, 'algo_{}'.format(tag))
insert_values(sheet, time, start_col + 1, 'time_{}'.format(tag))
print('Writing Complete')
def insert_values(sheet, key, col_num, header):
"""
Insert data to google sheets based on the arguments
"""
# Col Name
sheet.update_cell(1, col_num, header)
for id, val in enumerate(key):
sheet.update_cell(id + 2, col_num, val)
def get_dim(sheet):
"""
Get the dimensions of data
"""
try:
col_count = sheet.get_all_records()
except:
col_count = [[]]
row = len(col_count)
col = len(col_count[0])
return row, col
def row_append(data_frame, file):
"""
Append results to a local csv
"""
append_df = pd.read_csv(file)
concat_data = pd.concat([data_frame, append_df], axis=1)
return concat_data
# Example Usage
# ./update.py --file ../temp/test.out --exec-mode singlenode --auth client_json.json --tag 3.0
if __name__ == '__main__':
execution_mode = ['hybrid_spark', 'singlenode']
cparser = argparse.ArgumentParser(description='System-ML Update / Stat Script')
cparser.add_argument('--file', help='Location of the current perf test outputs',
required=True, metavar='')
cparser.add_argument('--exec-type', help='Backend Type', choices=execution_mode,
required=True, metavar='')
cparser.add_argument('--tag', help='Tagging header value',
required=True, metavar='')
cparser.add_argument('--auth', help='Location to read auth file', metavar='')
cparser.add_argument('--append', help='Location to append the outputs', metavar='')
args = cparser.parse_args()
if args.auth is None and args.append is None:
sys.exit('Both --auth and --append cannot be empty')
algo, time = parse_data(args.file)
if args.append is not None:
schema_df = {'algo_{}'.format(args.tag): algo,
'time_{}'.format(args.tag): time}
data_frame = pd.DataFrame(schema_df)
if os.path.isfile(args.append):
append_data = row_append(data_frame, args.append)
append_data.to_csv(args.append, sep=',', index=False)
else:
data_frame.to_csv(args.append, sep=',', index=False)
if args.auth is not None:
# Read data from file and write to google docs
algo, time = parse_data(args.file)
# Authenticate and get sheet dimensions
sheet = auth(args.auth, args.exec_type)
row, col = get_dim(sheet)
insert_pair(algo, time, col + 1, args.tag)
| apache-2.0 |
rhyolight/nupic.research | htmresearch/support/sp_paper_utils.py | 4 | 12897 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
from htmresearch.frameworks.sp_paper.sp_metrics import (
calculateInputOverlapMat, percentOverlap
)
from nupic.bindings.math import GetNTAReal
realDType = GetNTAReal()
uintType = "uint32"
def plotPermInfo(permInfo):
fig, ax = plt.subplots(5, 1, sharex=True)
ax[0].plot(permInfo['numConnectedSyn'])
ax[0].set_title('connected syn #')
ax[1].plot(permInfo['numNonConnectedSyn'])
ax[0].set_title('non-connected syn #')
ax[2].plot(permInfo['avgPermConnectedSyn'])
ax[2].set_title('perm connected')
ax[3].plot(permInfo['avgPermNonConnectedSyn'])
ax[3].set_title('perm unconnected')
# plt.figure()
# plt.subplot(3, 1, 1)
# plt.plot(perm - initialPermanence[columnIndex, :])
# plt.subplot(3, 1, 2)
# plt.plot(truePermanence - initialPermanence[columnIndex, :], 'r')
# plt.subplot(3, 1, 3)
# plt.plot(truePermanence - perm, 'r')
def plotAccuracyVsNoise(noiseLevelList, predictionAccuracy):
plt.figure()
plt.plot(noiseLevelList, predictionAccuracy, '-o')
plt.ylim([0, 1.05])
plt.xlabel('Noise level')
plt.ylabel('Prediction Accuracy')
def plotSPstatsOverTime(metrics, fileName=None):
fig, axs = plt.subplots(nrows=5, ncols=1, sharex=True)
metrics['stability'][0] = float('nan')
metrics['numNewSyn'][0] = float('nan')
metrics['numRemoveSyn'][0] = float('nan')
axs[0].plot(metrics['stability'])
axs[0].set_ylabel('Stability')
axs[1].plot(metrics['entropy'])
maxEntropy = metrics['maxEntropy']
maxEntropy = np.ones(len(maxEntropy)) * np.median(maxEntropy)
axs[1].plot(maxEntropy, 'k--')
axs[1].set_ylabel('Entropy (bits)')
if len(metrics['noiseRobustness']) > 0:
axs[2].plot(metrics['noiseRobustness'])
axs[2].set_ylabel('Noise Robustness')
axs[3].plot(metrics['numNewSyn'])
axs[3].set_ylabel('Synapses Formation')
axs[4].plot(metrics['numRemoveSyn'])
axs[4].set_ylabel('Synapse Removal')
axs[4].set_xlim([0, len(metrics['numRemoveSyn'])])
axs[4].set_xlabel('epochs')
if fileName is not None:
plt.savefig(fileName)
return axs
def plotReceptiveFields2D(sp, Nx, Ny):
inputSize = Nx * Ny
numColumns = np.product(sp.getColumnDimensions())
nrows = 4
ncols = 4
fig, ax = plt.subplots(nrows, ncols)
for r in range(nrows):
for c in range(ncols):
colID = np.random.randint(numColumns)
connectedSynapses = np.zeros((inputSize,), dtype=uintType)
sp.getConnectedSynapses(colID, connectedSynapses)
receptiveField = np.reshape(connectedSynapses, (Nx, Ny))
ax[r, c].imshow(1-receptiveField, interpolation="nearest", cmap='gray')
# ax[r, c].set_title('col {}'.format(colID))
ax[r, c].set_xticks([])
ax[r, c].set_yticks([])
def plotReceptiveFields(sp, nDim1=8, nDim2=8):
"""
Plot 2D receptive fields for 16 randomly selected columns
:param sp:
:return:
"""
columnNumber = np.product(sp.getColumnDimensions())
fig, ax = plt.subplots(nrows=4, ncols=4)
for rowI in range(4):
for colI in range(4):
col = np.random.randint(columnNumber)
connectedSynapses = np.zeros((nDim1*nDim2,), dtype=uintType)
sp.getConnectedSynapses(col, connectedSynapses)
receptiveField = connectedSynapses.reshape((nDim1, nDim2))
ax[rowI, colI].imshow(receptiveField, cmap='gray')
ax[rowI, colI].set_title("col: {}".format(col))
def plotReceptiveFieldCenter(RFcenters, connectedCounts, inputDims,
minConnection=None, maxConnection=None):
nX, nY = inputDims
import matplotlib.cm as cm
cmap = cm.get_cmap('jet')
if minConnection is None:
minConnection = np.min(connectedCounts)
if maxConnection is None:
maxConnection = np.max(connectedCounts)
fig = plt.figure()
sc = plt.scatter(RFcenters[:, 0], RFcenters[:, 1],
vmin=minConnection, vmax=maxConnection,
c=connectedCounts, cmap=cmap)
plt.colorbar(sc)
plt.axis('equal')
plt.xlim([-1, nX + 1])
plt.ylim([-1, nY + 1])
return fig
def plotBoostTrace(sp, inputVectors, columnIndex):
"""
Plot boostfactor for a selected column
Note that learning is ON for SP here
:param sp: sp instance
:param inputVectors: input data
:param columnIndex: index for the column of interest
"""
numInputVector, inputSize = inputVectors.shape
columnNumber = np.prod(sp.getColumnDimensions())
boostFactorsTrace = np.zeros((columnNumber, numInputVector))
activeDutyCycleTrace = np.zeros((columnNumber, numInputVector))
minActiveDutyCycleTrace = np.zeros((columnNumber, numInputVector))
for i in range(numInputVector):
outputColumns = np.zeros(sp.getColumnDimensions(), dtype=uintType)
inputVector = copy.deepcopy(inputVectors[i][:])
sp.compute(inputVector, True, outputColumns)
boostFactors = np.zeros((columnNumber, ), dtype=realDType)
sp.getBoostFactors(boostFactors)
boostFactorsTrace[:, i] = boostFactors
activeDutyCycle = np.zeros((columnNumber, ), dtype=realDType)
sp.getActiveDutyCycles(activeDutyCycle)
activeDutyCycleTrace[:, i] = activeDutyCycle
minActiveDutyCycle = np.zeros((columnNumber, ), dtype=realDType)
sp.getMinActiveDutyCycles(minActiveDutyCycle)
minActiveDutyCycleTrace[:, i] = minActiveDutyCycle
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(boostFactorsTrace[columnIndex, :])
plt.ylabel('Boost Factor')
plt.subplot(2, 1, 2)
plt.plot(activeDutyCycleTrace[columnIndex, :])
plt.plot(minActiveDutyCycleTrace[columnIndex, :])
plt.xlabel(' Time ')
plt.ylabel('Active Duty Cycle')
def analyzeReceptiveFieldSparseInputs(inputVectors, sp):
numColumns = np.product(sp.getColumnDimensions())
overlapMat = calculateInputOverlapMat(inputVectors, sp)
sortedOverlapMat = np.zeros(overlapMat.shape)
for c in range(numColumns):
sortedOverlapMat[c, :] = np.sort(overlapMat[c, :])
avgSortedOverlaps = np.flipud(np.mean(sortedOverlapMat, 0))
plt.figure()
plt.plot(avgSortedOverlaps, '-o')
plt.xlabel('sorted input vector #')
plt.ylabel('percent overlap')
plt.figure()
plt.imshow(overlapMat[:100, :], interpolation="nearest", cmap="magma")
plt.xlabel("Input Vector #")
plt.ylabel("SP Column #")
plt.colorbar()
plt.title('percent overlap')
def analyzeReceptiveFieldCorrelatedInputs(
inputVectors, sp, params, inputVectors1, inputVectors2):
columnNumber = np.prod(sp.getColumnDimensions())
numInputVector, inputSize = inputVectors.shape
numInputVector1 = params['numInputVectorPerSensor']
numInputVector2 = params['numInputVectorPerSensor']
w = params['numActiveInputBits']
inputSize1 = int(params['inputSize']/2)
inputSize2 = int(params['inputSize']/2)
connectedCounts = np.zeros((columnNumber,), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
numColumns = np.product(sp.getColumnDimensions())
overlapMat1 = np.zeros((numColumns, inputVectors1.shape[0]))
overlapMat2 = np.zeros((numColumns, inputVectors2.shape[0]))
numColumns = np.product(sp.getColumnDimensions())
numInputVector, inputSize = inputVectors.shape
for c in range(numColumns):
connectedSynapses = np.zeros((inputSize,), dtype=uintType)
sp.getConnectedSynapses(c, connectedSynapses)
for i in range(inputVectors1.shape[0]):
overlapMat1[c, i] = percentOverlap(connectedSynapses[:inputSize1],
inputVectors1[i, :inputSize1])
for i in range(inputVectors2.shape[0]):
overlapMat2[c, i] = percentOverlap(connectedSynapses[inputSize1:],
inputVectors2[i, :inputSize2])
sortedOverlapMat1 = np.zeros(overlapMat1.shape)
sortedOverlapMat2 = np.zeros(overlapMat2.shape)
for c in range(numColumns):
sortedOverlapMat1[c, :] = np.sort(overlapMat1[c, :])
sortedOverlapMat2[c, :] = np.sort(overlapMat2[c, :])
fig, ax = plt.subplots(nrows=2, ncols=2)
ax[0, 0].plot(np.mean(sortedOverlapMat1, 0), '-o')
ax[0, 1].plot(np.mean(sortedOverlapMat2, 0), '-o')
fig, ax = plt.subplots(nrows=1, ncols=2)
ax[0].imshow(overlapMat1[:100, :], interpolation="nearest", cmap="magma")
ax[0].set_xlabel('# Input 1')
ax[0].set_ylabel('SP Column #')
ax[1].imshow(overlapMat2[:100, :], interpolation="nearest", cmap="magma")
ax[1].set_xlabel('# Input 2')
ax[1].set_ylabel('SP Column #')
def runSPOnBatch(sp, inputVectors, learn, sdrOrders=None, verbose=0):
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
if sdrOrders is None:
sdrOrders = range(numInputVector)
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
if learn:
avgBoostFactors = np.zeros((numColumns,), dtype=realDType)
else:
avgBoostFactors = np.ones((numColumns,), dtype=realDType)
for i in range(numInputVector):
sp.compute(inputVectors[sdrOrders[i]][:], learn, outputColumns[sdrOrders[i]][:])
if learn:
boostFactors = np.zeros((numColumns,), dtype=realDType)
sp.getBoostFactors(boostFactors)
avgBoostFactors += boostFactors
if verbose > 0:
if i % 200 == 0:
print "{} % finished".format(100 * float(i) / float(numInputVector))
if learn:
avgBoostFactors = avgBoostFactors/numInputVector
return outputColumns, avgBoostFactors
def runDiscriminationTest(sp, inputVectors, numPairs=100):
"""
For two random input vectors, store their SP outputs. Create a merged version
of the two vectors and get its SP output.
Compare the overlap of the "merged input" SP output with the two stored SP
outputs.
Compute the average overlap between all pairs of vectors.
"""
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
sparsity = np.zeros(numPairs)
overlaps = np.zeros(numPairs)
mergedOverlaps = np.zeros(numPairs)
for a in range(numPairs):
outputColumns = np.zeros((3, numColumns), dtype=uintType)
i = np.random.randint(numInputVector)
j = np.random.randint(numInputVector)
sp.compute(inputVectors[i][:], False, outputColumns[0][:])
sp.compute(inputVectors[j][:], False, outputColumns[1][:])
mergedInput = inputVectors[j][:].copy()
mergedInput[inputVectors[i][:].nonzero()[0]] = 1
sp.compute(mergedInput, False, outputColumns[2][:])
overlaps[a] = outputColumns[1].dot(outputColumns[0])
mergedOverlaps[a] = (outputColumns[1].dot(outputColumns[2]) +
outputColumns[0].dot(outputColumns[2]))/2.0
sparsity[a] = (outputColumns[1].sum() + outputColumns[0].sum())/2.0
print "Mean/stdev overlap:",overlaps.mean(),overlaps.std()
print "Mean/stdev merged overlap:",mergedOverlaps.mean(),mergedOverlaps.std()
print "Mean number of active columns:",sparsity.mean()
def createDirectories(expName):
paths = []
paths.append('./results/traces/{}/'.format(expName))
paths.append('./results/InputCoverage/{}/'.format(expName))
paths.append('./results/classification/{}/'.format(expName))
paths.append('./results/input_output_overlap/{}/'.format(expName))
paths.append('./figures/InputCoverage/{}/'.format(expName))
paths.append('./figures/exampleRFs/{}/'.format(expName))
paths.append('./figures/ResponseToTestInputs/{}/'.format(expName))
paths.append('./figures/RFcenters/{}/'.format(expName))
paths.append('./figures/avgInputs/{}/'.format(expName))
paths.append('./figures/inputOverlaps/{}/'.format(expName))
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
def getConnectedSyns(sp):
numInputs = sp.getNumInputs()
numColumns = np.prod(sp.getColumnDimensions())
connectedSyns = np.zeros((numColumns, numInputs), dtype=uintType)
for columnIndex in range(numColumns):
sp.getConnectedSynapses(columnIndex, connectedSyns[columnIndex, :])
connectedSyns = connectedSyns.astype('float32')
return connectedSyns | gpl-3.0 |
altairpearl/scikit-learn | examples/model_selection/plot_validation_curve.py | 141 | 1931 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
KAPPS-/vincent | tests/test_vega.py | 9 | 32992 | # -*- coding: utf-8 -*-
'''
Test Vincent.vega
-----------------
'''
from datetime import datetime, timedelta
from itertools import product
import time
import json
from vincent.charts import Line
from vincent.core import (grammar, GrammarClass, GrammarDict, KeyedList,
LoadError, ValidationError)
from vincent.visualization import Visualization
from vincent.data import Data
from vincent.transforms import Transform
from vincent.properties import PropertySet
from vincent.scales import DataRef, Scale
from vincent.marks import ValueRef, MarkProperties, MarkRef, Mark
from vincent.axes import AxisProperties, Axis
from vincent.legends import LegendProperties, Legend
import nose.tools as nt
import pandas as pd
import numpy as np
sequences = {
'int': range,
'float': lambda l: list(map(float, list(range(l)))),
'char': lambda l: list(map(chr, list(range(97, 97 + l)))),
'datetime': lambda l: [datetime.now() + timedelta(days=i)
for i in range(l)],
'Timestamp': lambda l: pd.date_range('1/2/2000', periods=l),
'numpy float': lambda l: list(map(np.float32, list(range(l)))),
'numpy int': lambda l: list(map(np.int32, list(range(l))))}
def test_keyed_list():
"""Test keyed list implementation"""
class TestKey(object):
"""Test object for Keyed List"""
def __init__(self, name=None):
self.name = name
key_list = KeyedList(attr_name='name')
# Basic usage
test_key = TestKey(name='test')
key_list.append(test_key)
nt.assert_equal(test_key, key_list['test'])
# Bad key
with nt.assert_raises(KeyError) as err:
key_list['test_1']
nt.assert_equal(err.exception.args[0], ' "test_1" is an invalid key')
# Repeated keys
test_key_1 = TestKey(name='test')
key_list.append(test_key_1)
with nt.assert_raises(ValidationError) as err:
key_list['test']
nt.assert_equal(err.expected, ValidationError)
nt.assert_equal(err.exception.args[0], 'duplicate keys found')
# Setting keys
key_list.pop(-1)
test_key_2 = TestKey(name='test_2')
key_list['test_2'] = test_key_2
nt.assert_equal(key_list['test_2'], test_key_2)
mirror_key_2 = TestKey(name='test_2')
key_list['test_2'] = mirror_key_2
nt.assert_equal(key_list['test_2'], mirror_key_2)
key_list[0] = mirror_key_2
nt.assert_equal(key_list[0], mirror_key_2)
# Keysetting errors
test_key_3 = TestKey(name='test_3')
with nt.assert_raises(ValidationError) as err:
key_list['test_4'] = test_key_3
nt.assert_equal(err.expected, ValidationError)
nt.assert_equal(err.exception.args[0],
"key must be equal to 'name' attribute")
key_list = KeyedList(attr_name='type')
test_key_4 = TestKey(name='test_key_4')
with nt.assert_raises(ValidationError) as err:
key_list['test_key_4'] = test_key_4
nt.assert_equal(err.expected, ValidationError)
nt.assert_equal(err.exception.args[0], 'object must have type attribute')
def test_grammar():
"""Grammar decorator behaves correctly."""
validator_fail = False
class DummyType(object):
pass
class TestGrammarClass(object):
def __init__(self):
self.grammar = GrammarDict()
@grammar
def test_grammar(value):
if validator_fail:
raise ValueError('validator failed')
@grammar(grammar_type=DummyType)
def test_grammar_with_type(value):
if validator_fail:
raise ValueError('validator failed')
@grammar(grammar_name='a name')
def test_grammar_with_name(value):
if validator_fail:
raise ValueError('validator failed')
test = TestGrammarClass()
nt.assert_is_none(test.test_grammar)
nt.assert_dict_equal(test.grammar, {})
test.test_grammar = 'testing'
nt.assert_equal(test.test_grammar, 'testing')
nt.assert_dict_equal(test.grammar, {'test_grammar': 'testing'})
del test.test_grammar
nt.assert_is_none(test.test_grammar)
nt.assert_dict_equal(test.grammar, {})
validator_fail = True
nt.assert_raises_regexp(ValueError, 'validator failed', setattr, test,
'test_grammar', 'testing')
# grammar with type checking
test = TestGrammarClass()
validator_fail = False
dummy = DummyType()
test.test_grammar_with_type = dummy
nt.assert_equal(test.test_grammar_with_type, dummy)
nt.assert_dict_equal(test.grammar, {'test_grammar_with_type': dummy})
nt.assert_raises_regexp(ValueError, 'must be DummyType', setattr, test,
'test_grammar_with_type', 'testing')
validator_fail = True
nt.assert_raises_regexp(ValueError, 'validator failed', setattr, test,
'test_grammar_with_type', dummy)
# grammar with field name
test = TestGrammarClass()
validator_fail = False
test.test_grammar_with_name = 'testing'
nt.assert_equal(test.test_grammar_with_name, 'testing')
nt.assert_dict_equal(test.grammar, {'a name': 'testing'})
validator_fail = True
nt.assert_raises_regexp(ValueError, 'validator failed', setattr, test,
'test_grammar_with_name', 'testing')
def test_grammar_dict():
"""Test Vincent Grammar Dict"""
g_dict = GrammarDict()
test = Visualization()
test_dict = {'axes': [], 'data': [], 'marks': [],
'scales': [], 'legends': []}
test_str = ('{"axes": [], "data": [], "legends": [], '
'"marks": [], "scales": []}')
nt.assert_equal(test.grammar(), test_dict)
print(json.dumps(test.grammar, sort_keys=True))
nt.assert_equal(json.dumps(test.grammar, sort_keys=True),
test_str)
nt.assert_equal(g_dict.encoder(test), test.grammar)
def assert_grammar_typechecking(grammar_types, test_obj):
"""Assert that the grammar fields of a test object are correctly
type-checked.
`grammar_types` should be a list of (name, type) pairs, and `test_obj`
should be an instance of the object to test.
"""
class BadType(object):
pass
for name, objects in grammar_types:
for obj in objects:
tmp_obj = obj()
setattr(test_obj, name, tmp_obj)
nt.assert_equal(getattr(test_obj, name), tmp_obj)
bad_obj = BadType()
nt.assert_raises_regexp(ValueError, name + '.*' + obj.__name__,
setattr, test_obj, name, bad_obj)
nt.assert_equal(getattr(test_obj, name), tmp_obj)
def assert_manual_typechecking(bad_grammar, test_obj):
"""Some attrs use the _assert_is_type func for typechecking"""
for attr, value in bad_grammar:
with nt.assert_raises(ValueError) as err:
setattr(test_obj, attr, value)
nt.assert_equal(err.expected, ValueError)
def assert_grammar_validation(grammar_errors, test_obj):
"""Check grammar methods for validation errors"""
for attr, value, error, message in grammar_errors:
with nt.assert_raises(error) as err:
setattr(test_obj, attr, value)
nt.assert_equal(err.exception.args[0], message)
class TestGrammarClass(object):
"""Test GrammarClass's built-in methods that aren't tested elsewhere"""
def test_bad_init(self):
"""Test bad initialization"""
nt.assert_raises(ValueError, GrammarClass, width=50)
def test_validation(self):
"""Test validation of grammar"""
test = Visualization()
test.axes.append({'bad axes': 'ShouldRaiseError'})
with nt.assert_raises(ValidationError) as err:
test.validate()
nt.assert_equal(err.exception.args[0],
'invalid contents: axes[0] must be Axis')
class TestVisualization(object):
"""Test the Visualization Class"""
def test_grammar_typechecking(self):
"""Visualization fields are correctly type checked"""
grammar_types = [('name', [str]),
('width', [int]),
('height', [int]),
('data', [list, KeyedList]),
('scales', [list, KeyedList]),
('axes', [list, KeyedList]),
('marks', [list, KeyedList])]
assert_grammar_typechecking(grammar_types, Visualization())
def test_validation_checking(self):
"""Visualization fields are grammar-checked"""
grammar_errors = [('width', -1, ValueError,
'width cannot be negative'),
('height', -1, ValueError,
'height cannot be negative'),
('viewport', [1], ValueError,
'viewport must have 2 dimensions'),
('viewport', [-1, -1], ValueError,
'viewport dimensions cannot be negative'),
('padding', {'top': 2}, ValueError,
('Padding must have keys "top", "left", "right",'
' "bottom".')),
('padding',
{'top': 1, 'left': 1, 'right': 1, 'bottom': -1},
ValueError, 'Padding cannot be negative.'),
('padding', -1, ValueError,
'Padding cannot be negative.')]
assert_grammar_validation(grammar_errors, Visualization())
def test_manual_typecheck(self):
"""Test manual typechecking for elements like marks"""
test_attr = [('data', [1]), ('scales', [1]),
('axes', [1]), ('marks', [1]),
('legends', [1])]
assert_manual_typechecking(test_attr, Visualization())
def test_validation(self):
"""Test Visualization validation"""
test_obj = Visualization()
with nt.assert_raises(ValidationError) as err:
test_obj.validate()
nt.assert_equal(err.exception.args[0],
'data must be defined for valid visualization')
test_obj.data = [Data(name='test'), Data(name='test')]
with nt.assert_raises(ValidationError) as err:
test_obj.validate()
nt.assert_equal(err.exception.args[0],
'data has duplicate names')
def test_axis_labeling(self):
"""Test convenience method for axis label setting"""
# With Axes already in place
test_obj = Visualization()
test_obj.axes.extend([Axis(type='x'), Axis(type='y')])
test_obj.axis_titles(x="test1", y="test2")
nt.assert_equals(test_obj.axes['x'].title, 'test1')
nt.assert_equals(test_obj.axes['y'].title, 'test2')
# With no Axes already defined
del test_obj.axes[0]
del test_obj.axes[0]
test_obj.axis_titles(x="test1", y="test2")
nt.assert_equals(test_obj.axes['x'].title, 'test1')
nt.assert_equals(test_obj.axes['y'].title, 'test2')
def test_axis_properties(self):
test_vis = Visualization()
with nt.assert_raises(ValueError) as err:
test_vis.x_axis_properties(title_size=20, label_angle=30)
nt.assert_equals(err.exception.args[0],
'This Visualization has no axes!')
test_vis.axes = [Axis(scale='x'), Axis(scale='y')]
test_vis.x_axis_properties(title_size=20, title_offset=10,
label_angle=30, color='#000')
test_vis.y_axis_properties(title_size=20, title_offset=10,
label_angle=30, color='#000')
def check_axis_colors():
for axis in test_vis.axes:
props = axis.properties
for prop in [props.title.fill, props.labels.fill]:
nt.assert_equals(getattr(prop, 'value'), '#000')
for prop in [props.axis.stroke, props.major_ticks.stroke,
props.minor_ticks.stroke, props.ticks.stroke]:
nt.assert_equals(getattr(prop, 'value'), '#000')
for axis in test_vis.axes:
props = axis.properties
nt.assert_equals(props.labels.angle.value, 30)
nt.assert_equals(props.title.font_size.value, 20)
nt.assert_equals(props.title.dy.value, 10)
check_axis_colors()
test_vis.axes = [Axis(scale='x'), Axis(scale='y')]
test_vis.common_axis_properties(color='#000')
for axis in test_vis.axes:
check_axis_colors()
def test_legends(self):
test_vis = Visualization()
test_vis.legend(title='Test', text_color='#000')
nt.assert_equals(test_vis.legends[0].title, 'Test')
nt.assert_equals(test_vis.legends[0].properties.labels.fill.value,
'#000')
nt.assert_equals(test_vis.legends[0].properties.title.fill.value,
'#000')
def test_colors(self):
test_vis = Line([1, 2, 3])
rng = ['foo', 'bar']
test_vis.colors(range_=rng)
nt.assert_equals(test_vis.scales['color'].range, rng)
def test_to_json(self):
"""Test JSON to string"""
pretty = '''{
"marks": [],
"axes": [],
"data": [],
"scales": [],
"legends": []
}'''
test = Visualization()
actual, tested = json.loads(pretty), json.loads(test.to_json())
nt.assert_dict_equal(actual, tested)
class TestData(object):
"""Test the Data class"""
def test_grammar_typechecking(self):
"""Data fields are correctly type-checked"""
grammar_types = [
('name', [str]),
('url', [str]),
('values', [list]),
('source', [str]),
('transform', [list])]
assert_grammar_typechecking(grammar_types, Data('name'))
def test_validate(self):
"""Test Data name validation"""
test_obj = Data()
del test_obj.name
nt.assert_raises(ValidationError, test_obj.validate)
def test_serialize(self):
"""Objects are serialized to JSON-compatible objects"""
def epoch(obj):
"""Convert to JS Epoch time"""
return int(time.mktime(obj.timetuple())) * 1000
types = [('test', str, 'test'),
(pd.Timestamp('2013-06-08'), int,
epoch(pd.Timestamp('2013-06-08'))),
(datetime.utcnow(), int, epoch(datetime.utcnow())),
(1, int, 1),
(1.0, float, 1.0),
(np.float32(1), float, 1.0),
(np.int32(1), int, 1),
(np.float64(1), float, 1.0),
(np.int64(1), int, 1)]
for puts, pytype, gets in types:
nt.assert_equal(Data.serialize(puts), gets)
class BadType(object):
"""Bad object for type warning"""
test_obj = BadType()
with nt.assert_raises(LoadError) as err:
Data.serialize(test_obj)
nt.assert_equals(err.exception.args[0],
'cannot serialize index of type BadType')
def test_pandas_series_loading(self):
"""Pandas Series objects are correctly loaded"""
# Test valid series types
name = ['_x', ' name']
length = [0, 1, 2]
index_key = [None, 'ix', 1]
index_types = ['int', 'char', 'datetime', 'Timestamp']
value_key = [None, 'x', 1]
value_types = ['int', 'char', 'datetime', 'Timestamp', 'float',
'numpy float', 'numpy int']
series_info = product(name, length, index_key, index_types,
value_key, value_types)
for n, l, ikey, itype, vkey, vtype in series_info:
index = sequences[itype](l)
series = pd.Series(sequences[vtype](l), index=index, name=n,)
vkey = series.name or vkey
expected = [{'idx': Data.serialize(i), 'col': vkey,
'val': Data.serialize(v)}
for i, v in zip(index, series)]
data = Data.from_pandas(series, name=n, series_key=vkey)
nt.assert_list_equal(expected, data.values)
nt.assert_equal(n, data.name)
data.to_json()
# Missing a name
series = pd.Series(np.random.randn(10))
data = Data.from_pandas(series)
nt.assert_equal(data.name, 'table')
def test_pandas_dataframe_loading(self):
# Simple columns/key_on tests
df = pd.DataFrame({'one': [1, 2, 3], 'two': [6, 7, 8],
'three': [11, 12, 13], 'four': [17, 18, 19]})
get_all = [{'col': 'four', 'idx': 0, 'val': 17},
{'col': 'one', 'idx': 0, 'val': 1},
{'col': 'three', 'idx': 0, 'val': 11},
{'col': 'two', 'idx': 0, 'val': 6},
{'col': 'four', 'idx': 1, 'val': 18},
{'col': 'one', 'idx': 1, 'val': 2},
{'col': 'three', 'idx': 1, 'val': 12},
{'col': 'two', 'idx': 1, 'val': 7},
{'col': 'four', 'idx': 2, 'val': 19},
{'col': 'one', 'idx': 2, 'val': 3},
{'col': 'three', 'idx': 2, 'val': 13},
{'col': 'two', 'idx': 2, 'val': 8}]
get1 = [{'col': 'one', 'idx': 0, 'val': 1},
{'col': 'one', 'idx': 1, 'val': 2},
{'col': 'one', 'idx': 2, 'val': 3}]
get2 = [{'col': 'one', 'idx': 0, 'val': 1},
{'col': 'two', 'idx': 0, 'val': 6},
{'col': 'one', 'idx': 1, 'val': 2},
{'col': 'two', 'idx': 1, 'val': 7},
{'col': 'one', 'idx': 2, 'val': 3},
{'col': 'two', 'idx': 2, 'val': 8}]
getkey2 = [{'col': 'one', 'idx': 6, 'val': 1},
{'col': 'one', 'idx': 7, 'val': 2},
{'col': 'one', 'idx': 8, 'val': 3}]
getkey3 = [{'col': 'one', 'idx': 11, 'val': 1},
{'col': 'two', 'idx': 11, 'val': 6},
{'col': 'one', 'idx': 12, 'val': 2},
{'col': 'two', 'idx': 12, 'val': 7},
{'col': 'one', 'idx': 13, 'val': 3},
{'col': 'two', 'idx': 13, 'val': 8}]
val_all = Data.from_pandas(df)
val1 = Data.from_pandas(df, columns=['one'])
val2 = Data.from_pandas(df, columns=['one', 'two'])
key2 = Data.from_pandas(df, columns=['one'], key_on='two')
key3 = Data.from_pandas(df, columns=['one', 'two'], key_on='three')
nt.assert_list_equal(val_all.values, get_all)
nt.assert_list_equal(val1.values, get1)
nt.assert_list_equal(val2.values, get2)
nt.assert_list_equal(key2.values, getkey2)
nt.assert_list_equal(key3.values, getkey3)
# Missing a name
dataframe = pd.DataFrame(np.random.randn(10, 3))
data = Data.from_pandas(dataframe)
nt.assert_equal(data.name, 'table')
# Bad obj
nt.assert_raises(ValueError, Data.from_pandas, {})
def test_numpy_loading(self):
"""Numpy ndarray objects are correctly loaded"""
test_data = np.random.randn(6, 3)
index = range(test_data.shape[0])
columns = ['a', 'b', 'c']
data = Data.from_numpy(test_data, name='name', columns=columns)
ikey = Data._default_index_key
expected_values = [
{ikey: i, 'a': row[0], 'b': row[1], 'c': row[2]}
for i, row in zip(index, test_data.tolist())]
nt.assert_list_equal(expected_values, data.values)
nt.assert_equal('name', data.name)
index_key = 'akey'
data = Data.from_numpy(test_data, name='name', columns=columns,
index_key=index_key)
expected_values = [
{index_key: i, 'a': row[0], 'b': row[1], 'c': row[2]}
for i, row in zip(index, test_data.tolist())]
nt.assert_list_equal(expected_values, data.values)
index = ['a', 'b', 'c', 'd', 'e', 'f']
data = Data.from_numpy(test_data, name='name', index=index,
columns=columns)
expected_values = [
{ikey: i, 'a': row[0], 'b': row[1], 'c': row[2]}
for i, row in zip(index, test_data.tolist())]
nt.assert_list_equal(expected_values, data.values)
# Bad loads
with nt.assert_raises(LoadError) as err:
Data.from_numpy(test_data, 'test', columns, index=range(4))
nt.assert_equal(err.expected, LoadError)
columns = ['a', 'b']
with nt.assert_raises(LoadError) as err:
Data.from_numpy(test_data, 'test', columns, index)
nt.assert_equal(err.expected, LoadError)
def test_from_mult_iters(self):
"""Test set of iterables"""
test1 = Data.from_mult_iters(x=[0, 1, 2], y=[3, 4, 5], z=[7, 8, 9],
idx='x')
test2 = Data.from_mult_iters(fruit=['apples', 'oranges', 'grapes'],
count=[12, 16, 54], idx='fruit')
values1 = [{'col': 'y', 'idx': 0, 'val': 3},
{'col': 'y', 'idx': 1, 'val': 4},
{'col': 'y', 'idx': 2, 'val': 5},
{'col': 'z', 'idx': 0, 'val': 7},
{'col': 'z', 'idx': 1, 'val': 8},
{'col': 'z', 'idx': 2, 'val': 9}]
values2 = [{'col': 'count', 'idx': 'apples', 'val': 12},
{'col': 'count', 'idx': 'oranges', 'val': 16},
{'col': 'count', 'idx': 'grapes', 'val': 54}]
nt.assert_list_equal(test1.values, values1)
nt.assert_list_equal(test2.values, values2)
# Iter errors
nt.assert_raises(ValueError, Data.from_mult_iters, x=[0], y=[1, 2])
def test_from_iter(self):
"""Test data from single iterable"""
test_list = Data.from_iter([10, 20, 30])
test_dict = Data.from_iter({
'apples': 10, 'bananas': 20, 'oranges': 30})
get1 = [{'col': 'data', 'idx': 0, 'val': 10},
{'col': 'data', 'idx': 1, 'val': 20},
{'col': 'data', 'idx': 2, 'val': 30}]
get2 = [{'col': 'data', 'idx': 'apples', 'val': 10},
{'col': 'data', 'idx': 'bananas', 'val': 20},
{'col': 'data', 'idx': 'oranges', 'val': 30}]
nt.assert_list_equal(test_list.values, get1)
nt.assert_list_equal(test_dict.values, get2)
def test_serialize_error(self):
"""Test serialization error"""
class badType(object):
"""I am a bad actor"""
broken = badType()
nt.assert_raises(LoadError, Data.serialize, broken)
def test_keypairs(self):
Data.keypairs([0, 10, 20, 30, 40])
Data.keypairs(((0, 1), (0, 2), (0, 3)))
Data.keypairs({'A': 10, 'B': 20, 'C': 30, 'D': 40, 'E': 50})
class TestTransform(object):
"""Test the Transform class"""
def test_grammar_typechecking(self):
"""Transform field typechecking"""
grammar_types = [
('fields', [list]), ('from_', [str]),
('as_', [list]), ('keys', [list]), ('sort', [str]),
('test', [str]), ('field', [str]), ('expr', [str]),
('by', [str, list]), ('value', [str]), ('median', [bool]),
('with_', [str]), ('key', [str]), ('with_key', [str]),
('links', [str]), ('size', [list]), ('iterations', [int]),
('charge', [int, str]), ('link_distance', [int, str]),
('link_strength', [int, str]), ('friction', [int, float]),
('theta', [int, float]), ('gravity', [int, float]),
('alpha', [int, float]), ('point', [str]),
('height', [str])]
assert_grammar_typechecking(grammar_types, Transform())
class TestValueRef(object):
"""Test the ValueRef class"""
def test_grammar_typechecking(self):
"""ValueRef fields are correctly type-checked"""
grammar_types = [
('value', [str]),
('value', [int]),
('value', [float]),
('field', [str]),
('scale', [str]),
('mult', [int]),
('mult', [float]),
('offset', [int]),
('offset', [float]),
('band', [bool])]
assert_grammar_typechecking(grammar_types, ValueRef())
def test_json_serialization(self):
"""ValueRef JSON is correctly serialized"""
vref = ValueRef()
nt.assert_equal(json.dumps({}), vref.to_json(pretty_print=False))
props = {
'value': 'test-value',
'band': True}
vref = ValueRef(**props)
nt.assert_equal(json.dumps(props, sort_keys=True),
vref.to_json(pretty_print=False))
props = {
'value': 'test-value',
'field': 'test-field',
'scale': 'test-scale',
'mult': 1.2,
'offset': 4,
'band': True}
vref = ValueRef(**props)
nt.assert_equal(json.dumps(props, sort_keys=True),
vref.to_json(pretty_print=False))
class TestPropertySet(object):
"""Test the PropertySet Class"""
def test_grammar_typechecking(self):
"""PropertySet fields are correctly type-checked"""
# All fields must be ValueRef for Mark properties
fields = [
'x', 'x2', 'width', 'y', 'y2', 'height', 'opacity', 'fill',
'fill_opacity', 'stroke', 'stroke_width', 'stroke_opacity',
'size', 'shape', 'path', 'inner_radius', 'outer_radius',
'start_angle', 'end_angle', 'interpolate', 'tension', 'url',
'align', 'baseline', 'text', 'dx', 'dy', 'angle', 'font',
'font_size', 'font_weight', 'font_style']
grammar_types = [(f, [ValueRef]) for f in fields]
assert_grammar_typechecking(grammar_types, PropertySet())
def test_validation_checking(self):
"""ValueRef fields are grammar-checked"""
grammar_errors = [('fill_opacity', ValueRef(value=-1), ValueError,
'fill_opacity must be between 0 and 1'),
('fill_opacity', ValueRef(value=2), ValueError,
'fill_opacity must be between 0 and 1'),
('stroke_width', ValueRef(value=-1), ValueError,
'stroke width cannot be negative'),
('stroke_opacity', ValueRef(value=-1), ValueError,
'stroke_opacity must be between 0 and 1'),
('stroke_opacity', ValueRef(value=2), ValueError,
'stroke_opacity must be between 0 and 1'),
('size', ValueRef(value=-1), ValueError,
'size cannot be negative')]
assert_grammar_validation(grammar_errors, PropertySet())
bad_shape = ValueRef(value="BadShape")
nt.assert_raises(ValueError, PropertySet, shape=bad_shape)
def test_manual_typecheck(self):
"""Test manual typechecking for elements like marks"""
test_attr = [('fill', ValueRef(value=1)),
('fill_opacity', ValueRef(value='str')),
('stroke', ValueRef(value=1)),
('stroke_width', ValueRef(value='str')),
('stroke_opacity', ValueRef(value='str')),
('size', ValueRef(value='str')),
('shape', ValueRef(value=1)),
('path', ValueRef(value=1))]
assert_manual_typechecking(test_attr, PropertySet())
class TestMarkProperties(object):
"""Test the MarkProperty Class"""
def test_grammar_typechecking(self):
"""Test grammar of MarkProperty"""
fields = ['enter', 'exit', 'update', 'hover']
grammar_types = [(f, [PropertySet]) for f in fields]
assert_grammar_typechecking(grammar_types, MarkProperties())
class TestMarkRef(object):
"""Test the MarkRef Class"""
def test_grammar_typechecking(self):
"""Test grammar of MarkRef"""
grammar_types = [('data', [str]), ('transform', [list])]
assert_grammar_typechecking(grammar_types, MarkRef())
class TestMark(object):
"""Test Mark Class"""
def test_grammar_typechecking(self):
"""Test grammar of Mark"""
grammar_types = [('name', [str]), ('description', [str]),
('from_', [MarkRef]),
('properties', [MarkProperties]), ('key', [str]),
('key', [str]), ('delay', [ValueRef]),
('ease', [str]), ('marks', [list]),
('scales', [list, KeyedList])]
assert_grammar_typechecking(grammar_types, Mark())
def test_validation_checking(self):
"""Mark fields are grammar checked"""
nt.assert_raises(ValueError, Mark, type='panda')
class TestDataRef(object):
"""Test DataRef class"""
def test_grammar_typechecking(self):
"""Test grammar of DataRef"""
grammar_types = [('data', [str]), ('field', [str])]
assert_grammar_typechecking(grammar_types, DataRef())
class TestScale(object):
"""Test Scale class"""
def test_grammar_typechecking(self):
"""Test grammar of Scale"""
grammar_types = [('name', [str]), ('type', [str]),
('domain', [list, DataRef]),
('domain_min', [float, int, DataRef]),
('domain_max', [float, int, DataRef]),
('range', [list, str]),
('range_min', [float, int, DataRef]),
('range_max', [float, int, DataRef]),
('reverse', [bool]), ('round', [bool]),
('points', [bool]), ('clamp', [bool]),
('nice', [bool, str]),
('exponent', [float, int]),
('zero', [bool])]
assert_grammar_typechecking(grammar_types, Scale())
class TestAxisProperties(object):
"""Test AxisProperties Class"""
def test_grammar_typechecking(self):
"""Test grammar of AxisProperties"""
grammar_types = [('major_ticks', [PropertySet]),
('minor_ticks', [PropertySet]),
('labels', [PropertySet]),
('axis', [PropertySet])]
assert_grammar_typechecking(grammar_types, AxisProperties())
class TestAxis(object):
"""Test Axis Class"""
def test_grammar_typechecking(self):
"""Test grammar of Axis"""
grammar_types = [('title', [str]),
('title_offset', [int]),
('grid', [bool]),
('scale', [str]),
('orient', [str]), ('format', [str]),
('ticks', [int]), ('values', [list]),
('subdivide', [int, float]),
('tick_padding', [int]), ('tick_size', [int]),
('tick_size_major', [int]),
('tick_size_minor', [int]),
('tick_size_end', [int]),
('offset', [int]),
('properties', [AxisProperties])]
assert_grammar_typechecking(grammar_types, Axis())
def test_validation_checking(self):
"""Axis fields are grammar checked"""
nt.assert_raises(ValueError, Axis, type='panda')
class TestLegendProperties(object):
"""Test LegendProperties class"""
def test_grammar_typechecking(self):
"""Test grammar of LegendProperties"""
grammar_types = [('title', [PropertySet]),
('labels', [PropertySet]),
('symbols', [PropertySet]),
('gradient', [PropertySet]),
('legend', [PropertySet])]
assert_grammar_typechecking(grammar_types, LegendProperties())
class TestLegend(object):
"""Test Legend Class"""
def test_grammar_typechecking(self):
"""Test grammar of Legend"""
grammar_types = [('size', [str]),
('shape', [str]),
('fill', [str]),
('stroke', [str]),
('title', [str]),
('format', [str]),
('values', [list]),
('properties', [LegendProperties])]
assert_grammar_typechecking(grammar_types, Legend())
def test_validation_checking(self):
"""Legend fields are grammar checked"""
nt.assert_raises(ValueError, Legend, orient='center')
| mit |
sumspr/scikit-learn | sklearn/neighbors/tests/test_kde.py | 208 | 5556 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
aditipawde/TimeTable1 | TimeTable1/main_rajeshree.py | 1 | 6024 | import dataAccessSQLAlchemy as da
import pandas as pd
import random
import numpy as np
def isSlotAvailable(req_all, timetable_np, c, r_day, r_slot, r_lecnumber, req_id):
#If slot is of duration 1
SlotsAvailable = 0
for i in range(req_all[req_id, 'eachSlot']): #Fetching how many lectures do we require to slot
if(np.isnan(np.sum(timetable_np[c, r_day, r_slot+i, r_lecnumber]))): # Check if that slot is empty, this way of using np.isnan is the fastest way of doing so
req = req_all.loc[req_all.index == req_id]
if(req['category']=='T'): c='L'
else: c='T'
req_list= timetable_np[c, r_day, r_slot+i, :]
#Fetch the requirement records of the selected values
if(c in req_all[np.array(req_list), 'category'] or np.isnan(np.sum(timetable_np[c, r_day, r_slot+i, :]))): #Allow only if there is another lecture of same type, or no lecture at all
SlotsAvailable=SlotsAvailable+1
def teacher_overlap(timetable):
teacher_cost = 0
for day in range(n_days):
for slot in range (n_slots):
temp_array = timetable[:, day, slot, :]
teacher_list = []
print(temp_array)
for row in temp_array:
for cell in row:
if not np.isnan(cell):
req = req_all.loc[req_all.index == cell]
teacher_list.append(req.iloc[0]['teacherId'])
for teacher_id in teacher_list:
if teacher_id is not None:
teacher_cost = teacher_cost + teacher_list.count(teacher_id) - 1
return teacher_cost
def class_batch_overlap(timetable):
class_cost = 0
batch_cost = 0
for cl in range(n_classes):
for day in range(n_days):
for slot in range(n_slots):
class_list = []
batch_list = []
slot_array = timetable[cl,day,slot,:]
for sub_slot in slot_array:
if not np.isnan(sub_slot):
req = req_all.loc[req_all.index == sub_slot]
if (req.iloc[0]['category'] == 'T'):
class_list.append(req.iloc[0]['classId'])
elif req.iloc[0]['category'] == 'L':
batch_list.append(req.iloc[0]['batchId'])
for class_id in class_list:
class_cost = class_cost + class_list.count(class_id)-1
for batch_id in batch_list:
batches_can_overlap = f_batch_can_overlap[f_batch_can_overlap['batchId']==batch_id]
batches = batches_can_overlap['batchOverlapId']
print(batches)
for batch in batch_list:
batch_cost = batch_cost + batch_list.count(batch_id) - 1 #incorrect
print(batch_cost)
print(class_cost)
return (class_cost + batch_cost)
print("welcome");
f_subject_subjectClassTeacher = da.execquery('select s.subjectId, subjectShortName, totalHrs, eachSlot, c.classId, teacherId from subject s, subjectClassTeacher c where s.subjectId = c.subjectId;')
f_subject_subjectClassTeacher.insert(5,'batchId','-')
f_subject_subjectClassTeacher.insert(6,'category','T') #T for theory
f_subject_subjectBatchTeacher = da.execquery('select s.subjectId, subjectShortName, totalHrs, eachSlot, sbt.batchId, bc.classId, teacherId from subject s, subjectBatchTeacher sbt, batchClass bc where s.subjectId = sbt.subjectId AND sbt.batchId = bc.batchId;')
f_subject_subjectBatchTeacher.insert(6,'category','L') #L for Lab
f_subjectBatchClassTeacher = pd.concat([f_subject_subjectClassTeacher, f_subject_subjectBatchTeacher])
f_batch_can_overlap = da.execquery('select batchId, batchOverlapId from batchCanOverlap;')
print(f_batch_can_overlap)
x = f_subjectBatchClassTeacher
x = x.reset_index()
totallectures_list = (x['totalHrs'] / x['eachSlot'])
# Create empty dataframe to save all the requirements
req_all = pd.DataFrame(index=range(int(totallectures_list.sum())), columns=list(x))
j = 0
for i in range(len(req_all)):
if((x.iloc[j]['totalHrs']/x.iloc[j]['eachSlot'])>0):
req_all.loc[[i]] = x.iloc[[j]].values
x.set_value(j,'totalHrs', x.loc[j]['totalHrs'] - x.loc[j]['eachSlot'])
if (x.iloc[j]['totalHrs'] == 0):
j = j + 1
#print(req_all)
# Create new panel
#timetable1 = pd.panel4D(items=10, major_axis=5, minor_axis=10, dtype=int) # Check if we can do something by dtype = some class here
#print(timetable1)
#timetable1[3][2][4] = 7
#print(timetable1[3])
#These values need to be calculated from the database
n_classes=14
n_days=5
n_slots=10
n_maxlecsperslot=4
timetable_np = np.empty((n_classes, n_days, n_slots, n_maxlecsperslot))*np.nan
#print(timetable_np)
for c in (set(req_all.classId)): #First take one class
#print(c)
#http://stackoverflow.com/questions/17071871/select-rows-from-a-dataframe-based-on-values-in-a-column-in-pandas
req_forgivenclass=req_all.loc[req_all['classId'] == c] #List all the requirements for that class in req_forgivenclass
#print(req_forgivenclass)
#print(set(req_forgivenclass.index)) #These are the indices of the requirements for this class
for req in set(req_forgivenclass.index): #Schedule each of these requirements
notassigned = 1
while(notassigned==1): #Keep on scheduling till not found
r_day=random.randint(0,n_days-1)
r_slot = random.randint(0, n_slots-1)
r_lecnumber=random.randint(0,n_maxlecsperslot-1)
if(isSlotAvailable(req_all, timetable_np, c,r_day,r_slot,r_lecnumber, req)):
timetable_np[c,r_day,r_slot,r_lecnumber]=req
notassigned=0
#print(timetable_np[c,:,:,:])
#print(timetable_np.shape);
teacher_cost = teacher_overlap(timetable_np)
print("Teacher cost:")
print(teacher_cost);
cb_cost = class_batch_overlap(timetable_np)
print("Class cost:")
print(cb_cost);
| lgpl-3.0 |
hainm/statsmodels | statsmodels/sandbox/examples/example_gam.py | 33 | 2343 | '''original example for checking how far GAM works
Note: uncomment plt.show() to display graphs
'''
example = 2 # 1,2 or 3
import numpy as np
import numpy.random as R
import matplotlib.pyplot as plt
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 150
x1 = R.standard_normal(nobs)
x1.sort()
x2 = R.standard_normal(nobs)
x2.sort()
y = R.standard_normal((nobs,))
f1 = lambda x1: (x1 + x1**2 - 3 - 1 * x1**3 + 0.1 * np.exp(-x1/4.))
f2 = lambda x2: (x2 + x2**2 - 0.1 * np.exp(x2/4.))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) * 2 # 0.1
y += z
d = np.array([x1,x2]).T
if example == 1:
print("normal")
m = AdditiveModel(d)
m.fit(y)
x = np.linspace(-2,2,50)
print(m)
y_pred = m.results.predict(d)
plt.figure()
plt.plot(y, '.')
plt.plot(z, 'b-', label='true')
plt.plot(y_pred, 'r-', label='AdditiveModel')
plt.legend()
plt.title('gam.AdditiveModel')
import scipy.stats, time
if example == 2:
print("binomial")
f = family.Binomial()
b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
if example == 3:
print("Poisson")
f = family.Poisson()
y = y/y.max() * 3
yp = f.link.inverse(y)
p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
plt.figure()
plt.plot(x1, standardize(m.smoothers[0](x1)), 'r')
plt.plot(x1, standardize(f1(x1)), linewidth=2)
plt.figure()
plt.plot(x2, standardize(m.smoothers[1](x2)), 'r')
plt.plot(x2, standardize(f2(x2)), linewidth=2)
plt.show()
## pylab.figure(num=1)
## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')
## pylab.plot(x1, standardize(f1(x1)), linewidth=2)
## pylab.figure(num=2)
## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')
## pylab.plot(x2, standardize(f2(x2)), linewidth=2)
## pylab.show()
| bsd-3-clause |
APMonitor/arduino | 0_Test_Device/Python/test_Second_Order.py | 1 | 5010 | import tclab
import numpy as np
import time
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import random
# Second order model of TCLab
# initial parameter guesses
Kp = 0.2
taus = 50.0
zeta = 1.2
# magnitude of step
M = 80
# overdamped 2nd order step response
def model(y0,t,M,Kp,taus,zeta):
# y0 = initial y
# t = time
# M = magnitude of the step
# Kp = gain
# taus = second order time constant
# zeta = damping factor (zeta>1 for overdamped)
a = np.exp(-zeta*t/taus)
b = np.sqrt(zeta**2-1.0)
c = (t/taus)*b
y = Kp * M * (1.0 - a * (np.cosh(c)+(zeta/b)*np.sinh(c))) + y0
return y
# define objective for optimizer
def objective(p,tm,ymeas):
# p = optimization parameters
Kp = p[0]
taus = p[1]
zeta = p[2]
# tm = time points
# ymeas = measurements
# ypred = predicted values
n = np.size(tm)
ypred = np.ones(n)*ymeas[0]
for i in range(1,n):
ypred[i] = model(ymeas[0],tm[i],M,Kp,taus,zeta)
sse = sum((ymeas-ypred)**2)
# penalize bound violation
if taus<10.0:
sse = sse + 100.0 * (10.0-taus)**2
if taus>200.0:
sse = sse + 100.0 * (200.0-taus)**2
if zeta<=1.1:
sse = sse + 1e6 * (1.0-zeta)**2
if zeta>=5.0:
sse = sse + 1e6 * (5.0-zeta)**2
return sse
# Connect to Arduino
a = tclab.TCLab()
# Get Version
print(a.version)
# Turn LED on
print('LED On')
a.LED(100)
# Run time in minutes
run_time = 5.0
# Number of cycles
loops = int(60.0*run_time)
tm = np.zeros(loops)
z = np.zeros(loops)
# Temperature (K)
T1 = np.ones(loops) * a.T1 # measured T (degC)
T1p = np.ones(loops) * a.T1 # predicted T (degC)
# step test (0 - 100%)
Q1 = np.ones(loops) * 0.0
Q1[1:] = M # magnitude of the step
print('Running Main Loop. Ctrl-C to end.')
print(' Time Kp taus zeta')
print('{:6.1f} {:6.2f} {:6.2f} {:6.2f}'.format(tm[0],Kp,taus,zeta))
# Create plot
plt.figure(figsize=(10,7))
plt.ion()
plt.show()
# Main Loop
start_time = time.time()
prev_time = start_time
try:
for i in range(1,loops):
# Sleep time
sleep_max = 1.0
sleep = sleep_max - (time.time() - prev_time)
if sleep>=0.01:
time.sleep(sleep)
else:
time.sleep(0.01)
# Record time and change in time
t = time.time()
dt = t - prev_time
prev_time = t
tm[i] = t - start_time
# Read temperatures in Kelvin
T1[i] = a.T1
###############################
### CONTROLLER or ESTIMATOR ###
###############################
# Estimate parameters after 15 cycles and every 3 steps
if i>=15 and (np.mod(i,3)==0):
# randomize guess values
r = random.random()-0.5 # random number -0.5 to 0.5
Kp = Kp + r*0.05
taus = taus + r*1.0
zeta = zeta + r*0.01
p0=[Kp,taus,zeta] # initial parameters
solution = minimize(objective,p0,args=(tm[0:i+1],T1[0:i+1]))
p = solution.x
Kp = p[0]
taus = max(10.0,min(200.0,p[1])) # clip to >10, <=200
zeta = max(1.1,min(5.0,p[2])) # clip to >=1.1, <=5
# Update 2nd order prediction
for j in range(1,i+1):
T1p[j] = model(T1p[0],tm[j],M,Kp,taus,zeta)
# Write output (0-100)
a.Q1(Q1[i])
# Print line of data
print('{:6.1f} {:6.2f} {:6.2f} {:6.2f}'.format(tm[i],Kp,taus,zeta))
# Plot
plt.clf()
ax=plt.subplot(2,1,1)
ax.grid()
plt.plot(tm[0:i],T1p[0:i],'k-',label=r'$T_1 \, Pred$')
plt.plot(tm[0:i],T1[0:i],'ro',label=r'$T_1 \, Meas$')
plt.ylabel('Temperature (degC)')
plt.legend(loc=2)
ax=plt.subplot(2,1,2)
ax.grid()
plt.plot(tm[0:i],Q1[0:i],'b-',label=r'$Q_1$')
plt.ylabel('Heaters')
plt.xlabel('Time (sec)')
plt.legend(loc='best')
plt.draw()
plt.pause(0.05)
# Turn off heaters
a.Q1(0)
a.Q2(0)
# Save text file
a.save_txt(tm[0:i],Q1[0:i],z[0:i],T1[0:i],T1e[0:i],z[0:i],z[0:i])
# Save figure
plt.savefig('test_Second_Order.png')
# Allow user to end loop with Ctrl-C
except KeyboardInterrupt:
# Disconnect from Arduino
a.Q1(0)
a.Q2(0)
print('Shutting down')
a.close()
a.save_txt(tm[0:i],Q1[0:i],z[0:i],T1[0:i],z[0:i],z[0:i],z[0:i])
plt.savefig('test_Heaters.png')
# Make sure serial connection still closes when there's an error
except:
# Disconnect from Arduino
a.Q1(0)
a.Q2(0)
print('Error: Shutting down')
a.close()
a.save_txt(tm[0:i],Q1[0:i],z[0:i],T1[0:i],z[0:i],z[0:i],z[0:i])
plt.savefig('test_Second_Order.png')
raise
| apache-2.0 |
Achuth17/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
MechCoder/scikit-learn | sklearn/ensemble/__init__.py | 153 | 1382 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .iforest import IsolationForest
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "IsolationForest", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
henrykironde/scikit-learn | sklearn/mixture/gmm.py | 128 | 31069 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
brentp/goleft | indexcov/paper/plot-eiee-15.py | 1 | 1375 | from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style('white')
df = pd.read_table('eiee.15.bed.gz', compression='gzip')
print(df.head())
cols = list(df.columns)
cols[0] = "chrom"
cols = [c for c in cols[3:] if c != '15-0022964' and c != '15-0022989']
fig, ax = plt.subplots(1)
for c in cols:
ax.plot(df['start'], df[c], color='#cdcdcd', lw=0.3)
ax.plot(df['start'], df['15-0022989'], color='#a01b1b', lw=0.2, alpha=0.4)
ax.plot(df['start'], df['15-0022964'], color='#487535', lw=0.2, alpha=0.8)
ax.set_ylabel("Scaled Coverage")
ax.set_xlabel("Position On Chromosome 15")
ax.set_xlim(xmin=0, xmax=df.start.max())
print(df.start.max())
ax.axhline(y=1, color='#111111', ls="-", lw=0.6)
ax.set_ylim(0, 3)
plt.draw()
ticks = ax.get_xticks()
labels = ["%dM" % (t / 1000000) for t in ticks if t < df.start.max()]
ax.set_xticks(ticks, labels)
ax.set_xticklabels(labels)
ax.set_xlim(xmin=0, xmax=df.start.max())
sns.despine()
plt.show()
plt.close()
fig, ax = plt.subplots(1)
df = pd.read_table('eiee.15.roc')
for c in cols:
ax.plot(df['cov'], df[c], color='#dddddd', lw=2)
ax.plot(df['cov'], df['15-0022989'], color='#a01b1b', lw=1.6, alpha=0.4)
ax.plot(df['cov'], df['15-0022964'], color='#487535', lw=1.6, alpha=0.8)
ax.set_xlabel("Scaled Coverage")
ax.set_ylabel("Proportion of Regions Covered")
sns.despine()
plt.show()
| mit |
MohammedWasim/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
mmessick/Tax-Calculator | taxcalc/tests/test_decorators.py | 2 | 8784 | import os
import sys
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(cur_path, "../../"))
sys.path.append(os.path.join(cur_path, "../"))
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from numba import jit, vectorize, guvectorize
from taxcalc import *
@extract_array
@vectorize(['int32(int32)'])
def fnvec_ifelse_df(inc_in):
ans = -42
if inc_in < 5:
ans = -42
if inc_in >= 5 and inc_in < 8:
ans = 42
if inc_in >= 8:
ans = 99
return ans
@dataframe_vectorize(['int32(int32)'])
def fnvec_ifelse_df2(inc_in):
"""Docstring"""
ans = -42
if inc_in < 5:
ans = -42
if inc_in >= 5 and inc_in < 8:
ans = 42
if inc_in >= 8:
ans = 99
return ans
@extract_array
@guvectorize(["void(int32[:],int32[:])"], "(x) -> (x)")
def fnvec_copy_df(inc_in, inc_out):
for i in range(inc_in.shape[0]):
inc_out[i] = inc_in[i]
@dataframe_guvectorize(["void(int32[:],int32[:])"], "(x) -> (x)")
def fnvec_copy_df2(inc_in, inc_out):
"""Docstring"""
for i in range(inc_in.shape[0]):
inc_out[i] = inc_in[i]
def test_with_df_wrapper():
x = np.array([4, 5, 9], dtype='i4')
y = np.array([0, 0, 0], dtype='i4')
df = pd.DataFrame(data=np.column_stack((x, y)), columns=['x', 'y'])
fnvec_copy_df(df.x, df.y)
assert np.all(df.x.values == df.y.values)
z = fnvec_ifelse_df(df.x)
assert np.all(np.array([-42, 42, 99], dtype='i4') == z)
def test_with_dataframe_guvec():
x = np.array([4, 5, 9], dtype='i4')
y = np.array([0, 0, 0], dtype='i4')
df = pd.DataFrame(data=np.column_stack((x, y)), columns=['x', 'y'])
fnvec_copy_df2(df.x, df.y)
assert fnvec_copy_df2.__name__ == 'fnvec_copy_df2'
assert fnvec_copy_df2.__doc__ == 'Docstring'
assert np.all(df.x.values == df.y.values)
def test_with_dataframe_vec():
x = np.array([4, 5, 9], dtype='i4')
y = np.array([0, 0, 0], dtype='i4')
df = pd.DataFrame(data=np.column_stack((x, y)), columns=['x', 'y'])
z = fnvec_ifelse_df2(df.x)
assert fnvec_ifelse_df2.__name__ == 'fnvec_ifelse_df2'
assert fnvec_ifelse_df2.__doc__ == 'Docstring'
assert np.all(np.array([-42, 42, 99], dtype='i4') == z)
@dataframe_wrap_guvectorize(["void(int32[:],int32[:])"], "(x) -> (x)")
def fnvec_copy_dfw(x, y):
for i in range(x.shape[0]):
y[i] = x[i]
def test_with_dataframe_wrap_guvectorize():
x = np.array([4, 5, 9], dtype='i4')
y = np.array([0, 0, 0], dtype='i4')
df = pd.DataFrame(data=np.column_stack((x, y)), columns=['x', 'y'])
fnvec_copy_dfw(df)
assert(np.all(df.x == df.y))
def test_create_apply_function_string():
ans = create_apply_function_string(['a', 'b', 'c'], ['d', 'e'], [])
exp = ("def ap_func(x_0,x_1,x_2,x_3,x_4):\n"
" for i in range(len(x_0)):\n"
" x_0[i],x_1[i],x_2[i] = jitted_f(x_3[i],x_4[i])\n"
" return x_0,x_1,x_2\n")
assert ans == exp
def test_create_apply_function_string_with_params():
ans = create_apply_function_string(['a', 'b', 'c'], ['d', 'e'], ['d'])
exp = ("def ap_func(x_0,x_1,x_2,x_3,x_4):\n"
" for i in range(len(x_0)):\n"
" x_0[i],x_1[i],x_2[i] = jitted_f(x_3,x_4[i])\n"
" return x_0,x_1,x_2\n")
assert ans == exp
def test_create_toplevel_function_string_mult_outputs():
ans = create_toplevel_function_string(['a', 'b'], ['d', 'e'],
['pm', 'pm', 'pf', 'pm'])
exp = ''
exp = ("def hl_func(pm, pf):\n"
" from pandas import DataFrame\n"
" import numpy as np\n"
" outputs = \\\n"
" (pm.a, pm.b) = \\\n"
" applied_f(pm.a, pm.b, pf.d, pm.e, )\n"
" header = ['a', 'b']\n"
" return DataFrame(data=np.column_stack(outputs),"
"columns=header)")
assert ans == exp
def test_create_toplevel_function_string():
ans = create_toplevel_function_string(['a'], ['d', 'e'],
['pm', 'pf', 'pm'])
exp = ''
exp = ("def hl_func(pm, pf):\n"
" from pandas import DataFrame\n"
" import numpy as np\n"
" outputs = \\\n"
" (pm.a) = \\\n"
" applied_f(pm.a, pf.d, pm.e, )\n"
" header = ['a']\n"
" return DataFrame(data=outputs,"
"columns=header)")
assert ans == exp
def some_calc(x, y, z):
a = x + y
b = x + y + z
return (a, b)
def test_make_apply_function():
ans = make_apply_function(some_calc, ['a', 'b'], ['x', 'y', 'z'], [],
do_jit=True, no_python=True)
assert ans
@apply_jit(["a", "b"], ["x", "y", "z"], nopython=True)
def Magic_calc(x, y, z):
a = x + y
b = x + y + z
return (a, b)
def Magic(pm, pf):
# Adjustments
outputs = \
pf.a, pf.b = Magic_calc(pm, pf)
header = ['a', 'b']
return DataFrame(data=np.column_stack(outputs),
columns=header)
@iterate_jit(nopython=True)
def Magic_calc2(x, y, z):
a = x + y
b = x + y + z
return (a, b)
class Foo(object):
pass
@iterate_jit(nopython=True)
def bar(MARS):
if MARS == 1 or MARS == 6:
_sep = 2
else:
_sep = 1
return _sep
@iterate_jit(nopython=True)
def ret_everything(a, b, c, d, e, f):
c = a + b
d = a + b
e = a + b
f = a + b
return (c, d, e,
f)
def test_magic_apply_jit():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_magic_iterate_jit():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic_calc2(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_bar_iterate_jit():
pm = Foo()
pf = Foo()
pf.MARS = np.ones((5,))
pf._sep = np.ones((5,))
ans = bar(pm, pf)
exp = DataFrame(data=[2.0] * 5, columns=["_sep"])
assert_frame_equal(ans, exp)
def test_ret_everything_iterate_jit():
pm = Foo()
pf = Foo()
pf.a = np.ones((5,))
pf.b = np.ones((5,))
pf.c = np.ones((5,))
pf.d = np.ones((5,))
pf.e = np.ones((5,))
pf.f = np.ones((5,))
ans = ret_everything(pm, pf)
exp = DataFrame(data=[[2.0, 2.0, 2.0, 2.0]] * 5,
columns=["c", "d", "e", "f"])
assert_frame_equal(ans, exp)
@iterate_jit(parameters=['puf'], nopython=True, puf=True)
def Magic_calc3(x, y, z, puf):
a = x + y
if (puf):
b = x + y + z
else:
b = 42
return (a, b)
def test_function_takes_kwarg():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc3(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
def test_function_takes_kwarg_nondefault_value():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc3(pm, pf, puf=False)
exp = DataFrame(data=[[2.0, 42.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
@iterate_jit(nopython=True, puf=True)
def Magic_calc4(x, y, z, puf):
a = x + y
if (puf):
b = x + y + z
else:
b = 42
return (a, b)
def test_function_no_parameters_listed():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc4(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
@iterate_jit(parameters=['w'], nopython=True, puf=True)
def Magic_calc5(w, x, y, z, puf):
a = x + y
if (puf):
b = w[0] + x + y + z
else:
b = 42
return (a, b)
def test_function_parameters_optional():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pm.w = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc5(pm, pf)
exp = DataFrame(data=[[2.0, 4.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
| mit |
DonBeo/statsmodels | statsmodels/nonparametric/_kernel_base.py | 29 | 18238 | """
Module containing the base object for multivariate kernel density and
regression, plus some utilities.
"""
from statsmodels.compat.python import range, string_types
import copy
import numpy as np
from scipy import optimize
from scipy.stats.mstats import mquantiles
try:
import joblib
has_joblib = True
except ImportError:
has_joblib = False
from . import kernels
kernel_func = dict(wangryzin=kernels.wang_ryzin,
aitchisonaitken=kernels.aitchison_aitken,
gaussian=kernels.gaussian,
aitchison_aitken_reg = kernels.aitchison_aitken_reg,
wangryzin_reg = kernels.wang_ryzin_reg,
gauss_convolution=kernels.gaussian_convolution,
wangryzin_convolution=kernels.wang_ryzin_convolution,
aitchisonaitken_convolution=kernels.aitchison_aitken_convolution,
gaussian_cdf=kernels.gaussian_cdf,
aitchisonaitken_cdf=kernels.aitchison_aitken_cdf,
wangryzin_cdf=kernels.wang_ryzin_cdf,
d_gaussian=kernels.d_gaussian)
def _compute_min_std_IQR(data):
"""Compute minimum of std and IQR for each variable."""
s1 = np.std(data, axis=0)
q75 = mquantiles(data, 0.75, axis=0).data[0]
q25 = mquantiles(data, 0.25, axis=0).data[0]
s2 = (q75 - q25) / 1.349 # IQR
dispersion = np.minimum(s1, s2)
return dispersion
def _compute_subset(class_type, data, bw, co, do, n_cvars, ix_ord,
ix_unord, n_sub, class_vars, randomize, bound):
""""Compute bw on subset of data.
Called from ``GenericKDE._compute_efficient_*``.
Notes
-----
Needs to be outside the class in order for joblib to be able to pickle it.
"""
if randomize:
np.random.shuffle(data)
sub_data = data[:n_sub, :]
else:
sub_data = data[bound[0]:bound[1], :]
if class_type == 'KDEMultivariate':
from .kernel_density import KDEMultivariate
var_type = class_vars[0]
sub_model = KDEMultivariate(sub_data, var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
elif class_type == 'KDEMultivariateConditional':
from .kernel_density import KDEMultivariateConditional
k_dep, dep_type, indep_type = class_vars
endog = sub_data[:, :k_dep]
exog = sub_data[:, k_dep:]
sub_model = KDEMultivariateConditional(endog, exog, dep_type,
indep_type, bw=bw, defaults=EstimatorSettings(efficient=False))
elif class_type == 'KernelReg':
from .kernel_regression import KernelReg
var_type, k_vars, reg_type = class_vars
endog = _adjust_shape(sub_data[:, 0], 1)
exog = _adjust_shape(sub_data[:, 1:], k_vars)
sub_model = KernelReg(endog=endog, exog=exog, reg_type=reg_type,
var_type=var_type, bw=bw,
defaults=EstimatorSettings(efficient=False))
else:
raise ValueError("class_type not recognized, should be one of " \
"{KDEMultivariate, KDEMultivariateConditional, KernelReg}")
# Compute dispersion in next 4 lines
if class_type == 'KernelReg':
sub_data = sub_data[:, 1:]
dispersion = _compute_min_std_IQR(sub_data)
fct = dispersion * n_sub**(-1. / (n_cvars + co))
fct[ix_unord] = n_sub**(-2. / (n_cvars + do))
fct[ix_ord] = n_sub**(-2. / (n_cvars + do))
sample_scale_sub = sub_model.bw / fct #TODO: check if correct
bw_sub = sub_model.bw
return sample_scale_sub, bw_sub
class GenericKDE (object):
"""
Base class for density estimation and regression KDE classes.
"""
def _compute_bw(self, bw):
"""
Computes the bandwidth of the data.
Parameters
----------
bw: array_like or str
If array_like: user-specified bandwidth.
If a string, should be one of:
- cv_ml: cross validation maximum likelihood
- normal_reference: normal reference rule of thumb
- cv_ls: cross validation least squares
Notes
-----
The default values for bw is 'normal_reference'.
"""
self.bw_func = dict(normal_reference=self._normal_reference,
cv_ml=self._cv_ml, cv_ls=self._cv_ls)
if bw is None:
bwfunc = self.bw_func['normal_reference']
return bwfunc()
if not isinstance(bw, string_types):
self._bw_method = "user-specified"
res = np.asarray(bw)
else:
# The user specified a bandwidth selection method
self._bw_method = bw
bwfunc = self.bw_func[bw]
res = bwfunc()
return res
def _compute_dispersion(self, data):
"""
Computes the measure of dispersion.
The minimum of the standard deviation and interquartile range / 1.349
Notes
-----
Reimplemented in `KernelReg`, because the first column of `data` has to
be removed.
References
----------
See the user guide for the np package in R.
In the notes on bwscaling option in npreg, npudens, npcdens there is
a discussion on the measure of dispersion
"""
return _compute_min_std_IQR(data)
def _get_class_vars_type(self):
"""Helper method to be able to pass needed vars to _compute_subset.
Needs to be implemented by subclasses."""
pass
def _compute_efficient(self, bw):
"""
Computes the bandwidth by estimating the scaling factor (c)
in n_res resamples of size ``n_sub`` (in `randomize` case), or by
dividing ``nobs`` into as many ``n_sub`` blocks as needed (if
`randomize` is False).
References
----------
See p.9 in socserv.mcmaster.ca/racine/np_faq.pdf
"""
if bw is None:
self._bw_method = 'normal_reference'
if isinstance(bw, string_types):
self._bw_method = bw
else:
self._bw_method = "user-specified"
return bw
nobs = self.nobs
n_sub = self.n_sub
data = copy.deepcopy(self.data)
n_cvars = self.data_type.count('c')
co = 4 # 2*order of continuous kernel
do = 4 # 2*order of discrete kernel
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
# Define bounds for slicing the data
if self.randomize:
# randomize chooses blocks of size n_sub, independent of nobs
bounds = [None] * self.n_res
else:
bounds = [(i * n_sub, (i+1) * n_sub) for i in range(nobs // n_sub)]
if nobs % n_sub > 0:
bounds.append((nobs - nobs % n_sub, nobs))
n_blocks = self.n_res if self.randomize else len(bounds)
sample_scale = np.empty((n_blocks, self.k_vars))
only_bw = np.empty((n_blocks, self.k_vars))
class_type, class_vars = self._get_class_vars_type()
if has_joblib:
# `res` is a list of tuples (sample_scale_sub, bw_sub)
res = joblib.Parallel(n_jobs=self.n_jobs) \
(joblib.delayed(_compute_subset) \
(class_type, data, bw, co, do, n_cvars, ix_ord, ix_unord, \
n_sub, class_vars, self.randomize, bounds[i]) \
for i in range(n_blocks))
else:
res = []
for i in range(n_blocks):
res.append(_compute_subset(class_type, data, bw, co, do,
n_cvars, ix_ord, ix_unord, n_sub,
class_vars, self.randomize,
bounds[i]))
for i in range(n_blocks):
sample_scale[i, :] = res[i][0]
only_bw[i, :] = res[i][1]
s = self._compute_dispersion(data)
order_func = np.median if self.return_median else np.mean
m_scale = order_func(sample_scale, axis=0)
# TODO: Check if 1/5 is correct in line below!
bw = m_scale * s * nobs**(-1. / (n_cvars + co))
bw[ix_ord] = m_scale[ix_ord] * nobs**(-2./ (n_cvars + do))
bw[ix_unord] = m_scale[ix_unord] * nobs**(-2./ (n_cvars + do))
if self.return_only_bw:
bw = np.median(only_bw, axis=0)
return bw
def _set_defaults(self, defaults):
"""Sets the default values for the efficient estimation"""
self.n_res = defaults.n_res
self.n_sub = defaults.n_sub
self.randomize = defaults.randomize
self.return_median = defaults.return_median
self.efficient = defaults.efficient
self.return_only_bw = defaults.return_only_bw
self.n_jobs = defaults.n_jobs
def _normal_reference(self):
"""
Returns Scott's normal reference rule of thumb bandwidth parameter.
Notes
-----
See p.13 in [2] for an example and discussion. The formula for the
bandwidth is
.. math:: h = 1.06n^{-1/(4+q)}
where ``n`` is the number of observations and ``q`` is the number of
variables.
"""
X = np.std(self.data, axis=0)
return 1.06 * X * self.nobs ** (- 1. / (4 + self.data.shape[1]))
def _set_bw_bounds(self, bw):
"""
Sets bandwidth lower bound to effectively zero )1e-10), and for
discrete values upper bound to 1.
"""
bw[bw < 0] = 1e-10
_, ix_ord, ix_unord = _get_type_pos(self.data_type)
bw[ix_ord] = np.minimum(bw[ix_ord], 1.)
bw[ix_unord] = np.minimum(bw[ix_unord], 1.)
return bw
def _cv_ml(self):
"""
Returns the cross validation maximum likelihood bandwidth parameter.
Notes
-----
For more details see p.16, 18, 27 in Ref. [1] (see module docstring).
Returns the bandwidth estimate that maximizes the leave-out-out
likelihood. The leave-one-out log likelihood function is:
.. math:: \ln L=\sum_{i=1}^{n}\ln f_{-i}(X_{i})
The leave-one-out kernel estimator of :math:`f_{-i}` is:
.. math:: f_{-i}(X_{i})=\frac{1}{(n-1)h}
\sum_{j=1,j\neq i}K_{h}(X_{i},X_{j})
where :math:`K_{h}` represents the Generalized product kernel
estimator:
.. math:: K_{h}(X_{i},X_{j})=\prod_{s=1}^
{q}h_{s}^{-1}k\left(\frac{X_{is}-X_{js}}{h_{s}}\right)
"""
# the initial value for the optimization is the normal_reference
h0 = self._normal_reference()
bw = optimize.fmin(self.loo_likelihood, x0=h0, args=(np.log, ),
maxiter=1e3, maxfun=1e3, disp=0, xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def _cv_ls(self):
"""
Returns the cross-validation least squares bandwidth parameter(s).
Notes
-----
For more details see pp. 16, 27 in Ref. [1] (see module docstring).
Returns the value of the bandwidth that maximizes the integrated mean
square error between the estimated and actual distribution. The
integrated mean square error (IMSE) is given by:
.. math:: \int\left[\hat{f}(x)-f(x)\right]^{2}dx
This is the general formula for the IMSE. The IMSE differs for
conditional (``KDEMultivariateConditional``) and unconditional
(``KDEMultivariate``) kernel density estimation.
"""
h0 = self._normal_reference()
bw = optimize.fmin(self.imse, x0=h0, maxiter=1e3, maxfun=1e3, disp=0,
xtol=1e-3)
bw = self._set_bw_bounds(bw) # bound bw if necessary
return bw
def loo_likelihood(self):
raise NotImplementedError
class EstimatorSettings(object):
"""
Object to specify settings for density estimation or regression.
`EstimatorSettings` has several proporties related to how bandwidth
estimation for the `KDEMultivariate`, `KDEMultivariateConditional`,
`KernelReg` and `CensoredKernelReg` classes behaves.
Parameters
----------
efficient: bool, optional
If True, the bandwidth estimation is to be performed
efficiently -- by taking smaller sub-samples and estimating
the scaling factor of each subsample. This is useful for large
samples (nobs >> 300) and/or multiple variables (k_vars > 3).
If False (default), all data is used at the same time.
randomize: bool, optional
If True, the bandwidth estimation is to be performed by
taking `n_res` random resamples (with replacement) of size `n_sub` from
the full sample. If set to False (default), the estimation is
performed by slicing the full sample in sub-samples of size `n_sub` so
that all samples are used once.
n_sub: int, optional
Size of the sub-samples. Default is 50.
n_res: int, optional
The number of random re-samples used to estimate the bandwidth.
Only has an effect if ``randomize == True``. Default value is 25.
return_median: bool, optional
If True (default), the estimator uses the median of all scaling factors
for each sub-sample to estimate the bandwidth of the full sample.
If False, the estimator uses the mean.
return_only_bw: bool, optional
If True, the estimator is to use the bandwidth and not the
scaling factor. This is *not* theoretically justified.
Should be used only for experimenting.
n_jobs : int, optional
The number of jobs to use for parallel estimation with
``joblib.Parallel``. Default is -1, meaning ``n_cores - 1``, with
``n_cores`` the number of available CPU cores.
See the `joblib documentation
<https://pythonhosted.org/joblib/parallel.html>`_ for more details.
Examples
--------
>>> settings = EstimatorSettings(randomize=True, n_jobs=3)
>>> k_dens = KDEMultivariate(data, var_type, defaults=settings)
"""
def __init__(self, efficient=False, randomize=False, n_res=25, n_sub=50,
return_median=True, return_only_bw=False, n_jobs=-1):
self.efficient = efficient
self.randomize = randomize
self.n_res = n_res
self.n_sub = n_sub
self.return_median = return_median
self.return_only_bw = return_only_bw # TODO: remove this?
self.n_jobs = n_jobs
class LeaveOneOut(object):
"""
Generator to give leave-one-out views on X.
Parameters
----------
X : array-like
2-D array.
Examples
--------
>>> X = np.random.normal(0, 1, [10,2])
>>> loo = LeaveOneOut(X)
>>> for x in loo:
... print x
Notes
-----
A little lighter weight than sklearn LOO. We don't need test index.
Also passes views on X, not the index.
"""
def __init__(self, X):
self.X = np.asarray(X)
def __iter__(self):
X = self.X
nobs, k_vars = np.shape(X)
for i in range(nobs):
index = np.ones(nobs, dtype=np.bool)
index[i] = False
yield X[index, :]
def _get_type_pos(var_type):
ix_cont = np.array([c == 'c' for c in var_type])
ix_ord = np.array([c == 'o' for c in var_type])
ix_unord = np.array([c == 'u' for c in var_type])
return ix_cont, ix_ord, ix_unord
def _adjust_shape(dat, k_vars):
""" Returns an array of shape (nobs, k_vars) for use with `gpke`."""
dat = np.asarray(dat)
if dat.ndim > 2:
dat = np.squeeze(dat)
if dat.ndim == 1 and k_vars > 1: # one obs many vars
nobs = 1
elif dat.ndim == 1 and k_vars == 1: # one obs one var
nobs = len(dat)
else:
if np.shape(dat)[0] == k_vars and np.shape(dat)[1] != k_vars:
dat = dat.T
nobs = np.shape(dat)[0] # ndim >1 so many obs many vars
dat = np.reshape(dat, (nobs, k_vars))
return dat
def gpke(bw, data, data_predict, var_type, ckertype='gaussian',
okertype='wangryzin', ukertype='aitchisonaitken', tosum=True):
"""
Returns the non-normalized Generalized Product Kernel Estimator
Parameters
----------
bw: 1-D ndarray
The user-specified bandwidth parameters.
data: 1D or 2-D ndarray
The training data.
data_predict: 1-D ndarray
The evaluation points at which the kernel estimation is performed.
var_type: str, optional
The variable type (continuous, ordered, unordered).
ckertype: str, optional
The kernel used for the continuous variables.
okertype: str, optional
The kernel used for the ordered discrete variables.
ukertype: str, optional
The kernel used for the unordered discrete variables.
tosum : bool, optional
Whether or not to sum the calculated array of densities. Default is
True.
Returns
-------
dens: array-like
The generalized product kernel density estimator.
Notes
-----
The formula for the multivariate kernel estimator for the pdf is:
.. math:: f(x)=\frac{1}{nh_{1}...h_{q}}\sum_{i=1}^
{n}K\left(\frac{X_{i}-x}{h}\right)
where
.. math:: K\left(\frac{X_{i}-x}{h}\right) =
k\left( \frac{X_{i1}-x_{1}}{h_{1}}\right)\times
k\left( \frac{X_{i2}-x_{2}}{h_{2}}\right)\times...\times
k\left(\frac{X_{iq}-x_{q}}{h_{q}}\right)
"""
kertypes = dict(c=ckertype, o=okertype, u=ukertype)
#Kval = []
#for ii, vtype in enumerate(var_type):
# func = kernel_func[kertypes[vtype]]
# Kval.append(func(bw[ii], data[:, ii], data_predict[ii]))
#Kval = np.column_stack(Kval)
Kval = np.empty(data.shape)
for ii, vtype in enumerate(var_type):
func = kernel_func[kertypes[vtype]]
Kval[:, ii] = func(bw[ii], data[:, ii], data_predict[ii])
iscontinuous = np.array([c == 'c' for c in var_type])
dens = Kval.prod(axis=1) / np.prod(bw[iscontinuous])
if tosum:
return dens.sum(axis=0)
else:
return dens
| bsd-3-clause |
Myasuka/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
jimboatarm/workload-automation | wlauto/instrumentation/energy_model/__init__.py | 2 | 42149 | # Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#pylint: disable=attribute-defined-outside-init,access-member-before-definition,redefined-outer-name
from __future__ import division
import os
import math
import time
from tempfile import mktemp
from base64 import b64encode
from collections import Counter, namedtuple
try:
import jinja2
import pandas as pd
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
import numpy as np
low_filter = np.vectorize(lambda x: x > 0 and x or 0) # pylint: disable=no-member
import_error = None
except ImportError as e:
import_error = e
jinja2 = None
pd = None
plt = None
np = None
low_filter = None
from wlauto import Instrument, Parameter, File
from wlauto.exceptions import ConfigError, InstrumentError, DeviceError
from wlauto.instrumentation import instrument_is_installed
from wlauto.utils.types import caseless_string, list_or_caseless_string, list_of_ints
from wlauto.utils.misc import list_to_mask
FREQ_TABLE_FILE = 'frequency_power_perf_data.csv'
CPUS_TABLE_FILE = 'projected_cap_power.csv'
MEASURED_CPUS_TABLE_FILE = 'measured_cap_power.csv'
IDLE_TABLE_FILE = 'idle_power_perf_data.csv'
REPORT_TEMPLATE_FILE = 'report.template'
EM_TEMPLATE_FILE = 'em.template'
IdlePowerState = namedtuple('IdlePowerState', ['power'])
CapPowerState = namedtuple('CapPowerState', ['cap', 'power'])
class EnergyModel(object):
def __init__(self):
self.big_cluster_idle_states = []
self.little_cluster_idle_states = []
self.big_cluster_cap_states = []
self.little_cluster_cap_states = []
self.big_core_idle_states = []
self.little_core_idle_states = []
self.big_core_cap_states = []
self.little_core_cap_states = []
def add_cap_entry(self, cluster, perf, clust_pow, core_pow):
if cluster == 'big':
self.big_cluster_cap_states.append(CapPowerState(perf, clust_pow))
self.big_core_cap_states.append(CapPowerState(perf, core_pow))
elif cluster == 'little':
self.little_cluster_cap_states.append(CapPowerState(perf, clust_pow))
self.little_core_cap_states.append(CapPowerState(perf, core_pow))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
def add_cluster_idle(self, cluster, values):
for value in values:
if cluster == 'big':
self.big_cluster_idle_states.append(IdlePowerState(value))
elif cluster == 'little':
self.little_cluster_idle_states.append(IdlePowerState(value))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
def add_core_idle(self, cluster, values):
for value in values:
if cluster == 'big':
self.big_core_idle_states.append(IdlePowerState(value))
elif cluster == 'little':
self.little_core_idle_states.append(IdlePowerState(value))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
class PowerPerformanceAnalysis(object):
def __init__(self, data):
self.summary = {}
big_freqs = data[data.cluster == 'big'].frequency.unique()
little_freqs = data[data.cluster == 'little'].frequency.unique()
self.summary['frequency'] = max(set(big_freqs).intersection(set(little_freqs)))
big_sc = data[(data.cluster == 'big') &
(data.frequency == self.summary['frequency']) &
(data.cpus == 1)]
little_sc = data[(data.cluster == 'little') &
(data.frequency == self.summary['frequency']) &
(data.cpus == 1)]
self.summary['performance_ratio'] = big_sc.performance.item() / little_sc.performance.item()
self.summary['power_ratio'] = big_sc.power.item() / little_sc.power.item()
self.summary['max_performance'] = data[data.cpus == 1].performance.max()
self.summary['max_power'] = data[data.cpus == 1].power.max()
def build_energy_model(freq_power_table, cpus_power, idle_power, first_cluster_idle_state):
# pylint: disable=too-many-locals
em = EnergyModel()
idle_power_sc = idle_power[idle_power.cpus == 1]
perf_data = get_normalized_single_core_data(freq_power_table)
for cluster in ['little', 'big']:
cluster_cpus_power = cpus_power[cluster].dropna()
cluster_power = cluster_cpus_power['cluster'].apply(int)
core_power = (cluster_cpus_power['1'] - cluster_power).apply(int)
performance = (perf_data[perf_data.cluster == cluster].performance_norm * 1024 / 100).apply(int)
for perf, clust_pow, core_pow in zip(performance, cluster_power, core_power):
em.add_cap_entry(cluster, perf, clust_pow, core_pow)
all_idle_power = idle_power_sc[idle_power_sc.cluster == cluster].power.values
# CORE idle states
# We want the delta of each state w.r.t. the power
# consumption of the shallowest one at this level (core_ref)
idle_core_power = low_filter(all_idle_power[:first_cluster_idle_state] -
all_idle_power[first_cluster_idle_state - 1])
# CLUSTER idle states
# We want the absolute value of each idle state
idle_cluster_power = low_filter(all_idle_power[first_cluster_idle_state - 1:])
em.add_cluster_idle(cluster, idle_cluster_power)
em.add_core_idle(cluster, idle_core_power)
return em
def generate_em_c_file(em, big_core, little_core, em_template_file, outfile):
with open(em_template_file) as fh:
em_template = jinja2.Template(fh.read())
em_text = em_template.render(
big_core=big_core,
little_core=little_core,
em=em,
)
with open(outfile, 'w') as wfh:
wfh.write(em_text)
return em_text
def generate_report(freq_power_table, measured_cpus_table, cpus_table, idle_power_table, # pylint: disable=unused-argument
report_template_file, device_name, em_text, outfile):
# pylint: disable=too-many-locals
cap_power_analysis = PowerPerformanceAnalysis(freq_power_table)
single_core_norm = get_normalized_single_core_data(freq_power_table)
cap_power_plot = get_cap_power_plot(single_core_norm)
idle_power_plot = get_idle_power_plot(idle_power_table)
fig, axes = plt.subplots(1, 2)
fig.set_size_inches(16, 8)
for i, cluster in enumerate(reversed(cpus_table.columns.levels[0])):
projected = cpus_table[cluster].dropna(subset=['1'])
plot_cpus_table(projected, axes[i], cluster)
cpus_plot_data = get_figure_data(fig)
with open(report_template_file) as fh:
report_template = jinja2.Template(fh.read())
html = report_template.render(
device_name=device_name,
freq_power_table=freq_power_table.set_index(['cluster', 'cpus', 'frequency']).to_html(),
cap_power_analysis=cap_power_analysis,
cap_power_plot=get_figure_data(cap_power_plot),
idle_power_table=idle_power_table.set_index(['cluster', 'cpus', 'state']).to_html(),
idle_power_plot=get_figure_data(idle_power_plot),
cpus_table=cpus_table.to_html(),
cpus_plot=cpus_plot_data,
em_text=em_text,
)
with open(outfile, 'w') as wfh:
wfh.write(html)
return html
def wa_result_to_power_perf_table(df, performance_metric, index):
table = df.pivot_table(index=index + ['iteration'],
columns='metric', values='value').reset_index()
result_mean = table.groupby(index).mean()
result_std = table.groupby(index).std()
result_std.columns = [c + ' std' for c in result_std.columns]
result_count = table.groupby(index).count()
result_count.columns = [c + ' count' for c in result_count.columns]
count_sqrt = result_count.apply(lambda x: x.apply(math.sqrt))
count_sqrt.columns = result_std.columns # match column names for division
result_error = 1.96 * result_std / count_sqrt # 1.96 == 95% confidence interval
result_error.columns = [c + ' error' for c in result_mean.columns]
result = pd.concat([result_mean, result_std, result_count, result_error], axis=1)
del result['iteration']
del result['iteration std']
del result['iteration count']
del result['iteration error']
updated_columns = []
for column in result.columns:
if column == performance_metric:
updated_columns.append('performance')
elif column == performance_metric + ' std':
updated_columns.append('performance_std')
elif column == performance_metric + ' error':
updated_columns.append('performance_error')
else:
updated_columns.append(column.replace(' ', '_'))
result.columns = updated_columns
result = result[sorted(result.columns)]
result.reset_index(inplace=True)
return result
def get_figure_data(fig, fmt='png'):
tmp = mktemp()
fig.savefig(tmp, format=fmt, bbox_inches='tight')
with open(tmp, 'rb') as fh:
image_data = b64encode(fh.read())
os.remove(tmp)
return image_data
def get_normalized_single_core_data(data):
finite_power = np.isfinite(data.power) # pylint: disable=no-member
finite_perf = np.isfinite(data.performance) # pylint: disable=no-member
data_single_core = data[(data.cpus == 1) & finite_perf & finite_power].copy()
data_single_core['performance_norm'] = (data_single_core.performance /
data_single_core.performance.max() * 100).apply(int)
data_single_core['power_norm'] = (data_single_core.power /
data_single_core.power.max() * 100).apply(int)
return data_single_core
def get_cap_power_plot(data_single_core):
big_single_core = data_single_core[(data_single_core.cluster == 'big') &
(data_single_core.cpus == 1)]
little_single_core = data_single_core[(data_single_core.cluster == 'little') &
(data_single_core.cpus == 1)]
fig, axes = plt.subplots(1, 1, figsize=(12, 8))
axes.plot(big_single_core.performance_norm,
big_single_core.power_norm,
marker='o')
axes.plot(little_single_core.performance_norm,
little_single_core.power_norm,
marker='o')
axes.set_xlim(0, 105)
axes.set_ylim(0, 105)
axes.set_xlabel('Performance (Normalized)')
axes.set_ylabel('Power (Normalized)')
axes.grid()
axes.legend(['big cluster', 'little cluster'], loc=0)
return fig
def get_idle_power_plot(df):
fig, axes = plt.subplots(1, 2, figsize=(15, 7))
for cluster, ax in zip(['little', 'big'], axes):
data = df[df.cluster == cluster].pivot_table(index=['state'], columns='cpus', values='power')
err = df[df.cluster == cluster].pivot_table(index=['state'], columns='cpus', values='power_error')
data.plot(kind='bar', ax=ax, rot=30, yerr=err)
ax.set_title('{} cluster'.format(cluster))
ax.set_xlim(-1, len(data.columns) - 0.5)
ax.set_ylabel('Power (mW)')
return fig
def fit_polynomial(s, n):
# pylint: disable=no-member
coeffs = np.polyfit(s.index, s.values, n)
poly = np.poly1d(coeffs)
return poly(s.index)
def get_cpus_power_table(data, index, opps, leak_factors): # pylint: disable=too-many-locals
# pylint: disable=no-member
power_table = data[[index, 'cluster', 'cpus', 'power']].pivot_table(index=index,
columns=['cluster', 'cpus'],
values='power')
bs_power_table = pd.DataFrame(index=power_table.index, columns=power_table.columns)
for cluster in power_table.columns.levels[0]:
power_table[cluster, 0] = (power_table[cluster, 1] -
(power_table[cluster, 2] -
power_table[cluster, 1]))
bs_power_table.loc[power_table[cluster, 1].notnull(), (cluster, 1)] = fit_polynomial(power_table[cluster, 1].dropna(), 2)
bs_power_table.loc[power_table[cluster, 2].notnull(), (cluster, 2)] = fit_polynomial(power_table[cluster, 2].dropna(), 2)
if opps[cluster] is None:
bs_power_table.loc[bs_power_table[cluster, 1].notnull(), (cluster, 0)] = \
(2 * power_table[cluster, 1] - power_table[cluster, 2]).values
else:
voltages = opps[cluster].set_index('frequency').sort_index()
leakage = leak_factors[cluster] * 2 * voltages['voltage']**3 / 0.9**3
leakage_delta = leakage - leakage[leakage.index[0]]
bs_power_table.loc[:, (cluster, 0)] = \
(2 * bs_power_table[cluster, 1] + leakage_delta - bs_power_table[cluster, 2])
# re-order columns and rename colum '0' to 'cluster'
power_table = power_table[sorted(power_table.columns,
cmp=lambda x, y: cmp(y[0], x[0]) or cmp(x[1], y[1]))]
bs_power_table = bs_power_table[sorted(bs_power_table.columns,
cmp=lambda x, y: cmp(y[0], x[0]) or cmp(x[1], y[1]))]
old_levels = power_table.columns.levels
power_table.columns.set_levels([old_levels[0], list(map(str, old_levels[1])[:-1]) + ['cluster']],
inplace=True)
bs_power_table.columns.set_levels([old_levels[0], list(map(str, old_levels[1])[:-1]) + ['cluster']],
inplace=True)
return power_table, bs_power_table
def plot_cpus_table(projected, ax, cluster):
projected.T.plot(ax=ax, marker='o')
ax.set_title('{} cluster'.format(cluster))
ax.set_xticklabels(projected.columns)
ax.set_xticks(range(0, 5))
ax.set_xlim(-0.5, len(projected.columns) - 0.5)
ax.set_ylabel('Power (mW)')
ax.grid(True)
def opp_table(d):
if d is None:
return None
return pd.DataFrame(d.items(), columns=['frequency', 'voltage'])
class EnergyModelInstrument(Instrument):
name = 'energy_model'
desicription = """
Generates a power mode for the device based on specified workload.
This instrument will execute the workload specified by the agenda (currently, only ``sysbench`` is
supported) and will use the resulting performance and power measurments to generate a power mode for
the device.
This instrument requires certain features to be present in the kernel:
1. cgroups and cpusets must be enabled.
2. cpufreq and userspace governor must be enabled.
3. cpuidle must be enabled.
"""
parameters = [
Parameter('device_name', kind=caseless_string,
description="""The name of the device to be used in generating the model. If not specified,
``device.name`` will be used. """),
Parameter('big_core', kind=caseless_string,
description="""The name of the "big" core in the big.LITTLE system; must match
one of the values in ``device.core_names``. """),
Parameter('performance_metric', kind=caseless_string, mandatory=True,
description="""Metric to be used as the performance indicator."""),
Parameter('power_metric', kind=list_or_caseless_string,
description="""Metric to be used as the power indicator. The value may contain a
``{core}`` format specifier that will be replaced with names of big
and little cores to drive the name of the metric for that cluster.
Ether this or ``energy_metric`` must be specified but not both."""),
Parameter('energy_metric', kind=list_or_caseless_string,
description="""Metric to be used as the energy indicator. The value may contain a
``{core}`` format specifier that will be replaced with names of big
and little cores to drive the name of the metric for that cluster.
this metric will be used to derive power by deviding through by
execution time. Either this or ``power_metric`` must be specified, but
not both."""),
Parameter('power_scaling_factor', kind=float, default=1.0,
description="""Power model specfies power in milliWatts. This is a scaling factor that
power_metric values will be multiplied by to get milliWatts."""),
Parameter('big_frequencies', kind=list_of_ints,
description="""List of frequencies to be used for big cores. These frequencies must
be supported by the cores. If this is not specified, all available
frequencies for the core (as read from cpufreq) will be used."""),
Parameter('little_frequencies', kind=list_of_ints,
description="""List of frequencies to be used for little cores. These frequencies must
be supported by the cores. If this is not specified, all available
frequencies for the core (as read from cpufreq) will be used."""),
Parameter('idle_workload', kind=str, default='idle',
description="Workload to be used while measuring idle power."),
Parameter('idle_workload_params', kind=dict, default={},
description="Parameter to pass to the idle workload."),
Parameter('first_cluster_idle_state', kind=int, default=-1,
description='''The index of the first cluster idle state on the device. Previous states
are assumed to be core idles. The default is ``-1``, i.e. only the last
idle state is assumed to affect the entire cluster.'''),
Parameter('no_hotplug', kind=bool, default=False,
description='''This options allows running the instrument without hotpluging cores on and off.
Disabling hotplugging will most likely produce a less accurate power model.'''),
Parameter('num_of_freqs_to_thermal_adjust', kind=int, default=0,
description="""The number of frequencies begining from the highest, to be adjusted for
the thermal effect."""),
Parameter('big_opps', kind=opp_table,
description="""OPP table mapping frequency to voltage (kHz --> mV) for the big cluster."""),
Parameter('little_opps', kind=opp_table,
description="""OPP table mapping frequency to voltage (kHz --> mV) for the little cluster."""),
Parameter('big_leakage', kind=int, default=120,
description="""
Leakage factor for the big cluster (this is specific to a particular core implementation).
"""),
Parameter('little_leakage', kind=int, default=60,
description="""
Leakage factor for the little cluster (this is specific to a particular core implementation).
"""),
]
def validate(self):
if import_error:
message = 'energy_model instrument requires pandas, jinja2 and matplotlib Python packages to be installed; got: "{}"'
raise InstrumentError(message.format(import_error.message))
for capability in ['cgroups', 'cpuidle']:
if not self.device.has(capability):
message = 'The Device does not appear to support {}; does it have the right module installed?'
raise ConfigError(message.format(capability))
device_cores = set(self.device.core_names)
if (self.power_metric and self.energy_metric) or not (self.power_metric or self.energy_metric):
raise ConfigError('Either power_metric or energy_metric must be specified (but not both).')
if not device_cores:
raise ConfigError('The Device does not appear to have core_names configured.')
elif len(device_cores) != 2:
raise ConfigError('The Device does not appear to be a big.LITTLE device.')
if self.big_core and self.big_core not in self.device.core_names:
raise ConfigError('Specified big_core "{}" is in divice {}'.format(self.big_core, self.device.name))
if not self.big_core:
self.big_core = self.device.core_names[-1] # the last core is usually "big" in existing big.LITTLE devices
if not self.device_name:
self.device_name = self.device.name
if self.num_of_freqs_to_thermal_adjust and not instrument_is_installed('daq'):
self.logger.warn('Adjustment for thermal effect requires daq instrument. Disabling adjustment')
self.num_of_freqs_to_thermal_adjust = 0
def initialize(self, context):
self.number_of_cpus = {}
self.report_template_file = context.resolver.get(File(self, REPORT_TEMPLATE_FILE))
self.em_template_file = context.resolver.get(File(self, EM_TEMPLATE_FILE))
self.little_core = (set(self.device.core_names) - set([self.big_core])).pop()
self.perform_runtime_validation()
self.enable_all_cores()
self.configure_clusters()
self.discover_idle_states()
self.disable_thermal_management()
self.initialize_job_queue(context)
self.initialize_result_tracking()
def setup(self, context):
if not context.spec.label.startswith('idle_'):
return
for idle_state in self.get_device_idle_states(self.measured_cluster):
if idle_state.index > context.spec.idle_state_index:
idle_state.disable = 1
else:
idle_state.disable = 0
def fast_start(self, context): # pylint: disable=unused-argument
self.start_time = time.time()
def fast_stop(self, context): # pylint: disable=unused-argument
self.run_time = time.time() - self.start_time
def on_iteration_start(self, context):
self.setup_measurement(context.spec.cluster)
def thermal_correction(self, context):
if not self.num_of_freqs_to_thermal_adjust or self.num_of_freqs_to_thermal_adjust > len(self.big_frequencies):
return 0
freqs = self.big_frequencies[-self.num_of_freqs_to_thermal_adjust:]
spec = context.result.spec
if spec.frequency not in freqs:
return 0
data_path = os.path.join(context.output_directory, 'daq', '{}.csv'.format(self.big_core))
data = pd.read_csv(data_path)['power']
return _adjust_for_thermal(data, filt_method=lambda x: pd.rolling_median(x, 1000), thresh=0.9, window=5000)
# slow to make sure power results have been generated
def slow_update_result(self, context): # pylint: disable=too-many-branches
spec = context.result.spec
cluster = spec.cluster
is_freq_iteration = spec.label.startswith('freq_')
perf_metric = 0
power_metric = 0
thermal_adjusted_power = 0
if is_freq_iteration and cluster == 'big':
thermal_adjusted_power = self.thermal_correction(context)
for metric in context.result.metrics:
if metric.name == self.performance_metric:
perf_metric = metric.value
elif thermal_adjusted_power and metric.name in self.big_power_metrics:
power_metric += thermal_adjusted_power * self.power_scaling_factor
elif (cluster == 'big') and metric.name in self.big_power_metrics:
power_metric += metric.value * self.power_scaling_factor
elif (cluster == 'little') and metric.name in self.little_power_metrics:
power_metric += metric.value * self.power_scaling_factor
elif thermal_adjusted_power and metric.name in self.big_energy_metrics:
power_metric += thermal_adjusted_power / self.run_time * self.power_scaling_factor
elif (cluster == 'big') and metric.name in self.big_energy_metrics:
power_metric += metric.value / self.run_time * self.power_scaling_factor
elif (cluster == 'little') and metric.name in self.little_energy_metrics:
power_metric += metric.value / self.run_time * self.power_scaling_factor
if not (power_metric and (perf_metric or not is_freq_iteration)):
message = 'Incomplete results for {} iteration{}'
raise InstrumentError(message.format(context.result.spec.id, context.current_iteration))
if is_freq_iteration:
index_matter = [cluster, spec.num_cpus,
spec.frequency, context.result.iteration]
data = self.freq_data
else:
index_matter = [cluster, spec.num_cpus,
spec.idle_state_id, spec.idle_state_desc, context.result.iteration]
data = self.idle_data
if self.no_hotplug:
# due to that fact that hotpluging was disabled, power has to be artificially scaled
# to the number of cores that should have been active if hotplugging had occurred.
power_metric = spec.num_cpus * (power_metric / self.number_of_cpus[cluster])
data.append(index_matter + ['performance', perf_metric])
data.append(index_matter + ['power', power_metric])
def before_overall_results_processing(self, context):
# pylint: disable=too-many-locals
if not self.idle_data or not self.freq_data:
self.logger.warning('Run aborted early; not generating energy_model.')
return
output_directory = os.path.join(context.output_directory, 'energy_model')
os.makedirs(output_directory)
df = pd.DataFrame(self.idle_data, columns=['cluster', 'cpus', 'state_id',
'state', 'iteration', 'metric', 'value'])
idle_power_table = wa_result_to_power_perf_table(df, '', index=['cluster', 'cpus', 'state'])
idle_output = os.path.join(output_directory, IDLE_TABLE_FILE)
with open(idle_output, 'w') as wfh:
idle_power_table.to_csv(wfh, index=False)
context.add_artifact('idle_power_table', idle_output, 'export')
df = pd.DataFrame(self.freq_data,
columns=['cluster', 'cpus', 'frequency', 'iteration', 'metric', 'value'])
freq_power_table = wa_result_to_power_perf_table(df, self.performance_metric,
index=['cluster', 'cpus', 'frequency'])
freq_output = os.path.join(output_directory, FREQ_TABLE_FILE)
with open(freq_output, 'w') as wfh:
freq_power_table.to_csv(wfh, index=False)
context.add_artifact('freq_power_table', freq_output, 'export')
if self.big_opps is None or self.little_opps is None:
message = 'OPPs not specified for one or both clusters; cluster power will not be adjusted for leakage.'
self.logger.warning(message)
opps = {'big': self.big_opps, 'little': self.little_opps}
leakages = {'big': self.big_leakage, 'little': self.little_leakage}
try:
measured_cpus_table, cpus_table = get_cpus_power_table(freq_power_table, 'frequency', opps, leakages)
except (ValueError, KeyError, IndexError) as e:
self.logger.error('Could not create cpu power tables: {}'.format(e))
return
measured_cpus_output = os.path.join(output_directory, MEASURED_CPUS_TABLE_FILE)
with open(measured_cpus_output, 'w') as wfh:
measured_cpus_table.to_csv(wfh)
context.add_artifact('measured_cpus_table', measured_cpus_output, 'export')
cpus_output = os.path.join(output_directory, CPUS_TABLE_FILE)
with open(cpus_output, 'w') as wfh:
cpus_table.to_csv(wfh)
context.add_artifact('cpus_table', cpus_output, 'export')
em = build_energy_model(freq_power_table, cpus_table, idle_power_table, self.first_cluster_idle_state)
em_file = os.path.join(output_directory, '{}_em.c'.format(self.device_name))
em_text = generate_em_c_file(em, self.big_core, self.little_core,
self.em_template_file, em_file)
context.add_artifact('em', em_file, 'data')
report_file = os.path.join(output_directory, 'report.html')
generate_report(freq_power_table, measured_cpus_table, cpus_table,
idle_power_table, self.report_template_file,
self.device_name, em_text, report_file)
context.add_artifact('pm_report', report_file, 'export')
def initialize_result_tracking(self):
self.freq_data = []
self.idle_data = []
self.big_power_metrics = []
self.little_power_metrics = []
self.big_energy_metrics = []
self.little_energy_metrics = []
if self.power_metric:
self.big_power_metrics = [pm.format(core=self.big_core) for pm in self.power_metric]
self.little_power_metrics = [pm.format(core=self.little_core) for pm in self.power_metric]
else: # must be energy_metric
self.big_energy_metrics = [em.format(core=self.big_core) for em in self.energy_metric]
self.little_energy_metrics = [em.format(core=self.little_core) for em in self.energy_metric]
def configure_clusters(self):
self.measured_cores = None
self.measuring_cores = None
self.cpuset = self.device.get_cgroup_controller('cpuset')
self.cpuset.create_group('big', self.big_cpus, [0])
self.cpuset.create_group('little', self.little_cpus, [0])
for cluster in set(self.device.core_clusters):
self.device.set_cluster_governor(cluster, 'userspace')
def discover_idle_states(self):
online_cpu = self.device.get_online_cpus(self.big_core)[0]
self.big_idle_states = self.device.get_cpuidle_states(online_cpu)
online_cpu = self.device.get_online_cpus(self.little_core)[0]
self.little_idle_states = self.device.get_cpuidle_states(online_cpu)
if not (len(self.big_idle_states) >= 2 and len(self.little_idle_states) >= 2):
raise DeviceError('There do not appeart to be at least two idle states '
'on at least one of the clusters.')
def setup_measurement(self, measured):
measuring = 'big' if measured == 'little' else 'little'
self.measured_cluster = measured
self.measuring_cluster = measuring
self.measured_cpus = self.big_cpus if measured == 'big' else self.little_cpus
self.measuring_cpus = self.little_cpus if measured == 'big' else self.big_cpus
self.reset()
def reset(self):
self.enable_all_cores()
self.enable_all_idle_states()
self.reset_cgroups()
self.cpuset.move_all_tasks_to(self.measuring_cluster)
server_process = 'adbd' if self.device.platform == 'android' else 'sshd'
server_pids = self.device.get_pids_of(server_process)
children_ps = [e for e in self.device.ps()
if e.ppid in server_pids and e.name != 'sshd']
children_pids = [e.pid for e in children_ps]
pids_to_move = server_pids + children_pids
self.cpuset.root.add_tasks(pids_to_move)
for pid in pids_to_move:
try:
self.device.execute('busybox taskset -p 0x{:x} {}'.format(list_to_mask(self.measuring_cpus), pid))
except DeviceError:
pass
def enable_all_cores(self):
counter = Counter(self.device.core_names)
for core, number in counter.iteritems():
self.device.set_number_of_online_cores(core, number)
self.big_cpus = self.device.get_online_cpus(self.big_core)
self.little_cpus = self.device.get_online_cpus(self.little_core)
def enable_all_idle_states(self):
for cpu in self.device.online_cpus:
for state in self.device.get_cpuidle_states(cpu):
state.disable = 0
def reset_cgroups(self):
self.big_cpus = self.device.get_online_cpus(self.big_core)
self.little_cpus = self.device.get_online_cpus(self.little_core)
self.cpuset.big.set(self.big_cpus, 0)
self.cpuset.little.set(self.little_cpus, 0)
def perform_runtime_validation(self):
if not self.device.is_rooted:
raise InstrumentError('the device must be rooted to generate energy models')
if 'userspace' not in self.device.list_available_cluster_governors(0):
raise InstrumentError('userspace cpufreq governor must be enabled')
error_message = 'Frequency {} is not supported by {} cores'
available_frequencies = self.device.list_available_core_frequencies(self.big_core)
if self.big_frequencies:
for freq in self.big_frequencies:
if freq not in available_frequencies:
raise ConfigError(error_message.format(freq, self.big_core))
else:
self.big_frequencies = available_frequencies
available_frequencies = self.device.list_available_core_frequencies(self.little_core)
if self.little_frequencies:
for freq in self.little_frequencies:
if freq not in available_frequencies:
raise ConfigError(error_message.format(freq, self.little_core))
else:
self.little_frequencies = available_frequencies
def initialize_job_queue(self, context):
old_specs = []
for job in context.runner.job_queue:
if job.spec not in old_specs:
old_specs.append(job.spec)
new_specs = self.get_cluster_specs(old_specs, 'big', context)
new_specs.extend(self.get_cluster_specs(old_specs, 'little', context))
# Update config to refect jobs that will actually run.
context.config.workload_specs = new_specs
config_file = os.path.join(context.host_working_directory, 'run_config.json')
with open(config_file, 'wb') as wfh:
context.config.serialize(wfh)
context.runner.init_queue(new_specs)
def get_cluster_specs(self, old_specs, cluster, context):
core = self.get_core_name(cluster)
self.number_of_cpus[cluster] = sum([1 for c in self.device.core_names if c == core])
cluster_frequencies = self.get_frequencies_param(cluster)
if not cluster_frequencies:
raise InstrumentError('Could not read available frequencies for {}'.format(core))
min_frequency = min(cluster_frequencies)
idle_states = self.get_device_idle_states(cluster)
new_specs = []
for state in idle_states:
for num_cpus in xrange(1, self.number_of_cpus[cluster] + 1):
spec = old_specs[0].copy()
spec.workload_name = self.idle_workload
spec.workload_parameters = self.idle_workload_params
spec.idle_state_id = state.id
spec.idle_state_desc = state.desc
spec.idle_state_index = state.index
if not self.no_hotplug:
spec.runtime_parameters['{}_cores'.format(core)] = num_cpus
spec.runtime_parameters['{}_frequency'.format(core)] = min_frequency
if self.device.platform == 'chromeos':
spec.runtime_parameters['ui'] = 'off'
spec.cluster = cluster
spec.num_cpus = num_cpus
spec.id = '{}_idle_{}_{}'.format(cluster, state.id, num_cpus)
spec.label = 'idle_{}'.format(cluster)
spec.number_of_iterations = old_specs[0].number_of_iterations
spec.load(self.device, context.config.ext_loader)
spec.workload.init_resources(context)
spec.workload.validate()
new_specs.append(spec)
for old_spec in old_specs:
if old_spec.workload_name not in ['sysbench', 'dhrystone']:
raise ConfigError('Only sysbench and dhrystone workloads currently supported for energy_model generation.')
for freq in cluster_frequencies:
for num_cpus in xrange(1, self.number_of_cpus[cluster] + 1):
spec = old_spec.copy()
spec.runtime_parameters['{}_frequency'.format(core)] = freq
if not self.no_hotplug:
spec.runtime_parameters['{}_cores'.format(core)] = num_cpus
if self.device.platform == 'chromeos':
spec.runtime_parameters['ui'] = 'off'
spec.id = '{}_{}_{}'.format(cluster, num_cpus, freq)
spec.label = 'freq_{}_{}'.format(cluster, spec.label)
spec.workload_parameters['taskset_mask'] = list_to_mask(self.get_cpus(cluster))
spec.workload_parameters['threads'] = num_cpus
if old_spec.workload_name == 'sysbench':
# max_requests set to an arbitrary high values to make sure
# sysbench runs for full duriation even on highly
# performant cores.
spec.workload_parameters['max_requests'] = 10000000
spec.cluster = cluster
spec.num_cpus = num_cpus
spec.frequency = freq
spec.load(self.device, context.config.ext_loader)
spec.workload.init_resources(context)
spec.workload.validate()
new_specs.append(spec)
return new_specs
def disable_thermal_management(self):
if self.device.file_exists('/sys/class/thermal/thermal_zone0'):
tzone_paths = self.device.execute('ls /sys/class/thermal/thermal_zone*')
for tzpath in tzone_paths.strip().split():
mode_file = '{}/mode'.format(tzpath)
if self.device.file_exists(mode_file):
self.device.set_sysfile_value(mode_file, 'disabled')
def get_device_idle_states(self, cluster):
if cluster == 'big':
online_cpus = self.device.get_online_cpus(self.big_core)
else:
online_cpus = self.device.get_online_cpus(self.little_core)
idle_states = []
for cpu in online_cpus:
idle_states.extend(self.device.get_cpuidle_states(cpu))
return idle_states
def get_core_name(self, cluster):
if cluster == 'big':
return self.big_core
else:
return self.little_core
def get_cpus(self, cluster):
if cluster == 'big':
return self.big_cpus
else:
return self.little_cpus
def get_frequencies_param(self, cluster):
if cluster == 'big':
return self.big_frequencies
else:
return self.little_frequencies
def _adjust_for_thermal(data, filt_method=lambda x: x, thresh=0.9, window=5000, tdiff_threshold=10000):
n = filt_method(data)
n = n[~np.isnan(n)] # pylint: disable=no-member
d = np.diff(n) # pylint: disable=no-member
d = d[~np.isnan(d)] # pylint: disable=no-member
dmin = min(d)
dmax = max(d)
index_up = np.max((d > dmax * thresh).nonzero()) # pylint: disable=no-member
index_down = np.min((d < dmin * thresh).nonzero()) # pylint: disable=no-member
low_average = np.average(n[index_up:index_up + window]) # pylint: disable=no-member
high_average = np.average(n[index_down - window:index_down]) # pylint: disable=no-member
if low_average > high_average or index_down - index_up < tdiff_threshold:
return 0
else:
return low_average
if __name__ == '__main__':
import sys # pylint: disable=wrong-import-position,wrong-import-order
indir, outdir = sys.argv[1], sys.argv[2]
device_name = 'odroidxu3'
big_core = 'a15'
little_core = 'a7'
first_cluster_idle_state = -1
this_dir = os.path.dirname(__file__)
report_template_file = os.path.join(this_dir, REPORT_TEMPLATE_FILE)
em_template_file = os.path.join(this_dir, EM_TEMPLATE_FILE)
freq_power_table = pd.read_csv(os.path.join(indir, FREQ_TABLE_FILE))
measured_cpus_table, cpus_table = pd.read_csv(os.path.join(indir, CPUS_TABLE_FILE), # pylint: disable=unbalanced-tuple-unpacking
header=range(2), index_col=0)
idle_power_table = pd.read_csv(os.path.join(indir, IDLE_TABLE_FILE))
if not os.path.exists(outdir):
os.makedirs(outdir)
report_file = os.path.join(outdir, 'report.html')
em_file = os.path.join(outdir, '{}_em.c'.format(device_name))
em = build_energy_model(freq_power_table, cpus_table,
idle_power_table, first_cluster_idle_state)
em_text = generate_em_c_file(em, big_core, little_core,
em_template_file, em_file)
generate_report(freq_power_table, measured_cpus_table, cpus_table,
idle_power_table, report_template_file, device_name,
em_text, report_file)
| apache-2.0 |
exepulveda/swfc | python/plot_cross_stats_bm.py | 1 | 7207 | import sys
import random
import logging
import collections
import math
import sys
from scipy.stats import gaussian_kde
import matplotlib as mpl
#mpl.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import pandas as pd
from case_study_bm import setup_case_study_ore, attributes
def correlation_matrix(corr,labels):
fig,ax = plt.subplots()
cax = ax.imshow(corr, interpolation="nearest", cmap='jet')
#ax.grid(True)
#ax.set_xticks(labels)
#ax.set_yticklabels(labels,fontsize=6)
tickslocs = np.arange(len(labels))
ax.set_xticks(tickslocs)
ax.set_xticklabels(labels,rotation=90,fontsize=8,ha='left')
ax.set_yticks(tickslocs)
ax.set_yticklabels(labels,fontsize=8)
fig.colorbar(cax, ax=ax)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
return fig,ax
if __name__ == "__main__":
locations,data,min_values,max_values,scale,var_types,categories = setup_case_study_ore()
#'RockType','Mgt','Hem','Ab','Act','Ap','Bt','O','F','Na','Mg','Al','Si','P','Cl','K','Ca','Ti','V','Mn','Fe','SG','Fe_Rec']
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
variables = (0,1,2,3,5,6,20,22)
NV = len(variables)
N,ND = data.shape
fig,axs = plt.subplots(NV,NV,figsize=(30,30))
fig.subplots_adjust(wspace=0.1)
for i in range(NV):
for j in range(NV):
ax = axs[i,j]
#ax.set_axis_off()
#plot histograms on diagonal
for i,v in enumerate(variables):
ax = axs[i,i]
x = data[:,v]
ax.set_title(attributes[v])
if var_types[v] != 3:
n, bins, patches = ax.hist(x,color='blue', alpha=0.5,normed=True)
#print(i,v,bins)
min_x = np.min(x)
max_x = np.max(x)
x_grid = np.linspace(bins[0], bins[-1], 1000)
#KDE
bandwidth=(max_x - min_x) * 0.05
kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1))
pdf = kde.evaluate(x_grid)
ax.plot(x_grid, pdf, color='red', alpha=0.5, lw=1)
else:
#pie chart
c = collections.Counter(np.int32(x))
codes,count = zip(*c.most_common())
ax.pie(count,autopct='%1.1f%%')
#ploting scatter plot in the right upper
sample_size = 5000
for i in range(NV):
v = variables[i]
x = data[:,variables[i]]
if var_types[v] == 3:
x = np.int32(x)
c = collections.Counter(x)
mc = c.most_common()
mc.sort()
codes,count = zip(*mc)
for j in range(i+1,NV):
w = variables[j]
y = data[:,variables[j]]
ax = axs[i,j]
ax2 = axs[j,i]
ax2.set_axis_off()
if var_types[v] != 3 and var_types[w] != 3:
indices = np.random.choice(N,size=sample_size,replace=False)
ax.scatter(x[indices],y[indices],s=1)
# add stats table
#clust_data = []
#label = ['Min','Mean','Median','Max','Std']
#clust_data = [[np.min(x)],[np.mean(x)],[np.median(x)],np.max(x),[np.std(x)]]
#the_table = ax.table(cellText=clust_data,rowLabels=label,loc='center')
#ax.text(2, 10, r'$\cos(2 \pi t) \exp(-t)$', fontdict=font)
#ax2.text(2, 10, r'$min$={min}'.format(min=np.min(x)))
#ax2.text(0.1,0.6,'$min={:0.3f}$'.format(np.min(x)),fontsize=12)
#ax2.text(0.1,0.5,'$mean={:0.3f}$'.format(np.mean(x)),fontsize=12)
#ax2.text(0.1,0.4,'r$median={:0.3f}$'.format(np.median(x)),fontsize=12)
#ax2.text(0.1,0.3,'$max={:0.3f}$'.format(np.max(x)),fontsize=12)
#ax2.text(0.1,0.2,'$\sigma={:0.3f}$'.format(np.std(x)),fontsize=12)
#table version
row_labels= ['min','mean','median','max','std']
celldata= [
['{:0.3f}'.format(np.min(x))],
['{:0.3f}'.format(np.mean(x))],
['{:0.3f}'.format(np.median(x))],
['{:0.3f}'.format(np.max(x))],
['{:0.3f}'.format(np.std(x))]
]
ax2.table(cellText=celldata,rowLabels=row_labels,loc='center left',fontsize=24,colWidths = [0.4])
#row_labels=['min','mean','median','max','$\sigma$']
#table_vals=['${:0.3f}$'.format(np.min(x)),'${:0.3f}$'.format(np.min(x)),'${:0.3f}$'.format(np.min(x)),'${:0.3f}$'.format(np.min(x))]
#table = r'''\begin{tabular}{ c | c | c | c } & col1 & col2 & col3 \\\hline row1 & 11 & 12 & 13 \\\hline row2 & 21 & 22 & 23 \\\hline row3 & 31 & 32 & 33 \end{tabular}'''
#plt.text(0.1,0.8,table,size=12)
elif var_types[v] == 3 and var_types[w] != 3:
#boxplot
d = []
row_labels= ['min','mean','median','max','std']
col_labels= [str(x) for x in codes]
celldata = [ [ None for x in range(len(codes))] for y in range(5)]
for k,c in enumerate(codes):
indices = np.where(x == c)[0]
yindices = y[indices]
d += [yindices]
celldata[0][k] = '{:0.3f}'.format(np.min(yindices))
celldata[1][k] = '{:0.3f}'.format(np.mean(yindices))
celldata[2][k] = '{:0.3f}'.format(np.median(yindices))
celldata[3][k] = '{:0.3f}'.format(np.max(yindices))
celldata[4][k] = '{:0.3f}'.format(np.std(yindices))
ax.boxplot(d) #,showmeans=True)
table = ax2.table(cellText=celldata,loc='center left',rowLabels=row_labels,colLabels=col_labels,fontsize=24,colWidths = [0.2]*len(codes))
#cell = table._cells[(1, 0)]
#cell.set_text_props(ha='left')
#print(i,j,celldata)
#ploting main stats as text lower
sample_size = 5000
for i in range(NV):
v = variables[i]
x = data[:,variables[i]]
if var_types[v] == 3:
x = np.int32(x)
c = collections.Counter(x)
codes,count = zip(*c.most_common())
for j in range(i+1,NV):
w = variables[j]
y = data[:,variables[j]]
ax = axs[i,j]
if var_types[v] != 3 and var_types[w] != 3:
indices = np.random.choice(N,size=sample_size,replace=False)
ax.scatter(x[indices],y[indices],s=1)
elif var_types[v] == 3 and var_types[w] != 3:
#boxplot
d = []
#
#plt.savefig("../figures/bm_cross_stats.svg", format="svg")
plt.savefig("../figures/bm_cross_stats.jpg", format="jpg")
| gpl-3.0 |
hsuantien/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
themrmax/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 78 | 2702 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
def true_fun(X):
return np.cos(1.5 * np.pi * X)
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, edgecolor='b', s=20, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
Mohitsharma44/citibike-challenge | citibike_1.py | 2 | 3200 | import numpy as np
from datetime import datetime, timedelta
from sys import argv
import matplotlib.pyplot as plt
class CitiBikeChallenge():
def __init__(self):
pass
def load_file(self, path):
print 'Loading data ... '
self.data = np.genfromtxt(path, dtype=None,
delimiter=',', skip_header=True)
return self.data
def gender(self, data):
print 'Detecting gender Distribution...'
self.males = np.zeros(data.shape[0])
self.females = np.zeros(data.shape[0])
self.unknown = np.zeros(data.shape[0])
self.males = np.char.strip(data[0:, 14],
'"').astype(np.float) == 1
self.females = np.char.strip(data[0:, 14],
'"').astype(np.float) == 2
self.unknown = np.char.strip(data[:,14],
'"').astype(np.float) == 0
return(self.males.astype(int), self.females.astype(int),
self.unknown.astype(int))
def avg_ride_time(self, data):
print 'Calculating average ride time ...'
def _calc_time(data):
return datetime.strptime(str(np.char.strip(data,
'"')),
'%Y-%m-%d %H:%M:%S')
# Vectorize method
v_calc_time = np.vectorize(_calc_time)
self.start = v_calc_time(data[:,1])
self.stop = v_calc_time(data[:,2])
# Take Average
self.diff = np.subtract(self.stop, self.start)
return (np.sum(self.diff)/self.start.shape[0], self.start,
self.stop, self.diff)
def peak_hours(self, start_time):
self.idx = np.zeros([24, start_time.shape[0]])
print 'Calculating usage per hours ...'
def _check(start_time, _cond):
# Classify rides into hours
if start_time.hour == _cond.total_seconds()/3600:
return 1
else:
return 0
# Vectorize method
vcheck = np.vectorize(_check)
for i in xrange(24):
_cond = timedelta(hours = i)
self.idx[i] = vcheck(start_time,
_cond)#.nonzero()[0].shape[0]
return self.idx
def tourists(self, data):
print 'Detecting tourists ...'
self.tourists = np.char.strip(data[:,12], '"') == 'Subscriber'
self.customers = np.char.strip(data[:,12], '"') == 'Customer'
return(self.tourists, self.customers)
if __name__ == '__main__':
print 'Please Wait ..'
path = argv[1]
cbc = CitiBikeChallenge()
# Read file
f = cbc.load_file(argv[1])
print 'Total entries read: ',f.shape[0]
# Gender
g = cbc.gender(f)
print 'Total Male: ',g[0]
print 'Total Female: ',g[1]
# Average Ride Time
art,start,_,_ = cbc.avg_ride_time(f)
print 'Average Ride Time: ',art
# Peak Hours
pk = cbc.peak_hours(start)
# Tourists
utype = cbc.tourists(f)
print 'Total New Yorkers: ',utype[0]
print 'Total Tourists: ',utype[1]
| mit |
vsjha18/finplots | candlestick.py | 1 | 7937 | """
Created on 05-Apr-2015
@author: vivejha
"""
#from . import log
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
from matplotlib.finance import candlestick_ohlc
from finplots.overlays import plot_sma
from finplots.overlays import plot_volume
from finplots.overlays import plot_bollinger_bands
from finplots.macd import plot_macd
from finplots.rsi import plot_rsi
from finplots.stochastics import plot_slow_stochastic
from finplots import style
# global settings
# plt.style.use('dark_background')
# plt.style.use('ggplot')
# changes the fontsize
matplotlib.rcParams.update({'font.size':10})
def candlestick_plot(df,
smas=[100, 50, 5 , 10],
style=style,
figsize=(18, 10),
rsi_setup = dict(period=14),
macd_setup = dict(slow=26, fast=12, ema=8),
bbands_setup = dict(period=20, multiplier=2),
sstoch_setup = dict(period=14, smoothing=3)
):
""" plot candlestick chart """
fig = plt.figure(figsize=figsize, facecolor=style.face_color) # 18, 10 for full screen
# create main axis for charting prices
ax1 = plt.subplot2grid((10,4), (0,0),
rowspan=6,
colspan=4,
axisbg=style.axis_bg_color)
if 'volume' not in df:
df['volume'] = np.zeros(len(df))
# times = pd.date_range('2014-01-01', periods=l, freq='1d')
df.date = pd.to_datetime(df.date)
df.date = [mdates.date2num(d) for d in df.date]
df = df[::-1]
payload = df[['date', 'open', 'high', 'low', 'close', 'volume']].values
candlestick_ohlc(ax1, payload, width=0.5, colorup=style.cdl_up_color, colordown=style.cdl_down_color)
annotate_max(ax1, df)
ax1.grid(True, alpha=style.grid_alpha, color=style.grid_color)
plt.ylabel('Stock Price', color=style.label_color)
# determines number of points to be displayed on x axis
ax1.xaxis.set_major_locator(mticker.MaxNLocator(50))
ax1.yaxis.set_major_locator(mticker.MaxNLocator(15))
# determines format of markers on the xaxis
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%y'))
# label color
ax1.yaxis.label.set_color(style.label_color)
# tick params color
ax1.tick_params(axis='y', colors=style.tick_color)
# spine colors
ax1.spines['bottom'].set_color(style.spine_color)
ax1.spines['top'].set_color(style.spine_color)
ax1.spines['left'].set_color(style.spine_color)
ax1.spines['right'].set_color(style.spine_color)
# make the x tick label invisible
plt.setp(ax1.get_xticklabels(), visible=False)
# OVERLAY SIMPLE MOVING AVERAGES
for idx, period in enumerate(smas):
ax1 = plot_sma(ax1, df,
period=period,
color=style.sma_colors[idx])
# OVERLAY BOLLINGER BAND
ax1 = plot_bollinger_bands(ax1, df, period=bbands_setup['period'], multiplier=bbands_setup['multiplier'])
# OVERLAY VOLUME
# it is important to plot volume after the simple moving
# average to avoid a warning message 'no labelled objects found'
if 'volume' in df:
ax1 = plot_volume(ax1, df)
# show tick params on right axis as well
ax1.tick_params(labelright=True)
# RELATIVE STRENGTH INDEX
ax_rsi = plt.subplot2grid((10,4), (9,0),
rowspan=1,
colspan=4,
sharex=ax1,
axisbg=style.axis_bg_color)
plot_rsi(ax_rsi, df, period=rsi_setup['period'])
# MOVING AVERAGE CONVERGENCE DIVERGENCE
ax_macd = plt.subplot2grid((10,4), (8,0),
rowspan=1,
colspan=4,
sharex=ax1,
axisbg=style.axis_bg_color)
ax_macd = plot_macd(ax_macd, df,
slow=macd_setup['slow'],
fast=macd_setup['fast'],
ema=macd_setup['ema'])
# SLOW STOCHASTIC
# create axis for charting prices
ax_sstoch = plt.subplot2grid((10,4), (6,0),
rowspan=2,
colspan=4,
sharex=ax1,
axisbg=style.axis_bg_color)
ax_sstoch = plot_slow_stochastic(ax_sstoch, df,
period=sstoch_setup['period'],
smoothing=sstoch_setup['smoothing'])
#
# ema_fast, ema_slow, macd = moving_average_convergence_divergence(df.close)
# ema9 = exponential_moving_average(macd, nema)
#
# # plot_macd(ax_macd, df, style=style, slow=macd_setup['slow'], fast=macd_setup['fast'], ema=macd_setup['nema'] )
# ax3.plot(df.index, macd, linewidth=2, color='lime')
# ax3.plot(df.index, ema9, linewidth=2, color='hotpink')
#
#
#
#
# # FROM HERE
# # prune the yaxis
# ax3.yaxis.set_major_locator(mticker.MaxNLocator(nbins=3, prune='lower'))
#
# # print text
# ax3.text(0.015, 0.95, 'MACD 12,26,9', va='top', color='white', transform=ax3.transAxes)
# # put markers for signal line
# # following line needs as many stuff as there are markers
# # hence we have commented this out.
# # ax_rsi.axes.yaxis.set_ticklabels([30, 70])
#
# #ax3.set_yticks([])
#
# # provide the yaxis range
# #ax3.set_ylim(0, 100)
#
# # draw horizontal lines
# # ax3.axhline(70, color=style.rsi_signal_line_color, alpha=style.rsi_signal_line_alpha)
# # ax3.axhline(50, color=style.rsi_signal_line_color, alpha=style.rsi_signal_line_alpha)
# #ax3.axhline(0, color='w')
# # ax3.axhline(30, color=style.rsi_signal_line_color, alpha=style.rsi_signal_line_alpha)
#
# # fill color
# div = macd - ema9
# ax3.fill_between(df.index, div, 0, facecolor='deepskyblue', edgecolor='w', alpha=0.3)
#
# # ax3.fill_between(df.index, rsi_data, 30, where=(rsi_data<=30), facecolor=style.rsi_oversold_color)
# # label color
# ax3.yaxis.label.set_color(style.label_color)
#
# # spine colors
# ax3.spines['bottom'].set_color(style.spine_color)
# ax3.spines['top'].set_color(style.spine_color)
# ax3.spines['left'].set_color(style.spine_color)
# ax3.spines['right'].set_color(style.spine_color)
#
# # tick params color
# ax3.tick_params(axis='y', colors='w')
# ax3.tick_params(axis='x', colors='w')
#
# # plot the grids.
# ax3.grid(True, alpha=style.grid_alpha, color=style.grid_color)
# plt.ylabel('MACD', color=style.label_color)
# plt.setp(ax3.get_xticklabels(), visible=False)
# # Till here
# make the labels a bit rotated for better visibility
for label in ax_rsi.xaxis.get_ticklabels():
label.set_rotation(45)
# adjust the size of the plot
#plt.subplots_adjust(left=0.10, bottom=0.19, right=0.93, top=0.95, wspace=0.20, hspace=0.0)
plt.subplots_adjust(left=0.07, bottom=0.10, right=0.97, top=0.95, wspace=0.20, hspace=0.0)
# plt.xlabel('Date', color=style.label_color)
plt.suptitle('Stock Price Chart', color=style.label_color)
plt.show()
def annotate_max(ax, df, text='Max'):
#import ipdb; ipdb.set_trace()
max = df.high.max()
idx = df.high.tolist().index(max)
ax.annotate(text,
xy=(df.date[idx], df['high'][idx]), # theta, radius
xytext=(0.5, 1), # fraction, fraction
xycoords='data',
textcoords='axes fraction',
arrowprops=dict(facecolor='grey', shrink=0.05),
horizontalalignment='left',
verticalalignment='bottom',
)
def marker(idx, ycord, text, orgin, color):
pass
| gpl-3.0 |
gem/oq-engine | openquake/hazardlib/tests/gsim/sgobba_2020_test.py | 1 | 10324 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2020, GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
import numpy as np
import pandas as pd
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, SA
from openquake.hazardlib.geo import Point
# from openquake.hazardlib.geo.mesh import RectangularMesh
from openquake.hazardlib.tests.gsim.mgmpe.dummy import Dummy
from openquake.hazardlib.gsim.sgobba_2020 import SgobbaEtAl2020
from openquake.hazardlib.contexts import DistancesContext
# folder Verif Tables
DATA_FOLDER = os.path.join(os.path.dirname(__file__), 'data', 'SEA20')
# folder Residuals
DATA_FOLDER2 = os.path.join(os.path.dirname(__file__), '..', '..', 'gsim', 'sgobba_2020')
class Sgobba2020Test(unittest.TestCase):
def test_ERGODIC(self):
# Read dataframe with information
fname = 'ValidationTable_MEAN_ERG_full.csv'
df = pd.read_csv(os.path.join(DATA_FOLDER, fname))
# Check number of events
epicenters = []
for idx, row in df.iterrows():
x = Point(row.lon_epi, row.lat_epi)
if x not in epicenters:
epicenters.append(x)
# For each event check Validation
for i in range(len(epicenters)):
LON = epicenters[i].x
LAT = epicenters[i].y
idx = np.where((df['lat_epi'] == LAT) & (df['lon_epi'] == LON))
subset_df = df.loc[idx]
# Get parameters
locs = []
rjb = []
for idx, row in subset_df.iterrows():
locs.append(Point(row.lon_sites, row.lat_sites))
rjb.append(row.dist_jb)
# Create the sites
sites = Dummy.get_site_collection(len(rjb), vs30=800., location=locs)
# Create distance and rupture contexts
rup = Dummy.get_rupture(mag=row.rup_mag, ev_lat=row.lat_epi, ev_lon=row.lon_epi)
dists = DistancesContext()
dists.rjb = np.array(rjb)
# Instantiate the GMM
gmmref = SgobbaEtAl2020(cluster=0)
# Computes results for the non-ergodic model
periods = [PGA(), SA(period=0.2), SA(period=0.50251256281407), SA(period=1.0), SA(period=2.0)]
tags = ['gmm_PGA', 'gmm_SA02', 'gmm_SA05', 'gmm_SA10', 'gmm_SA20']
stdt = [const.StdDev.TOTAL]
# Compute and check results for the ergodic model
for i in range(len(periods)):
imt = periods[i]
tag = tags[i]
mr, stdr = gmmref.get_mean_and_stddevs(sites, rup, dists, imt, stdt)
expected_ref = subset_df[tag].to_numpy() # Verif Table in g unit
computed_ref = np.exp(mr) # in OQ are computed in g Units in ln
np.testing.assert_allclose(computed_ref, expected_ref, rtol=1e-5)
def test_NON_ERGODIC(self):
# Read dataframe with information
fname = 'ValidationTable_MEAN_NERG_full.csv'
df = pd.read_csv(os.path.join(DATA_FOLDER, fname))
# Read dataframe with information
fname2 = 'event.csv'
df2 = pd.read_csv(os.path.join(DATA_FOLDER2, fname2), dtype={'id': str})
# Check number of events
epicenters = []
for idx, row in df.iterrows():
x = Point(row.lon_epi, row.lat_epi)
if x not in epicenters:
epicenters.append(x)
# For each event check Validation
for i in range(len(epicenters)):
LON = epicenters[i].x
LAT = epicenters[i].y
idx = np.where((df['lat_epi'] == LAT) & (df['lon_epi'] == LON))
subset_df = df.loc[idx]
flag_b = subset_df['flag_bedrock']
if sum(flag_b) > 0:
bedrock = [0, 1]
else:
bedrock = [0]
for i in bedrock:
idx = np.where((df['lat_epi'] == LAT) & (df['lon_epi'] == LON) & (df['flag_bedrock'] == i))
subset_df = df.loc[idx]
idx2 = np.where((df2['Ev_lat'] == LAT) & (df2['Ev_lon'] == LON))[0]
if len(idx2) > 0:
idx2 = idx2[0]
ev_id = df2['id'][idx2]
else:
ev_id = None
print('event_id: '+str(ev_id))
print('flag_bedrock: '+str(i))
# Get parameters
locs = []
rjb = []
bedrock = False
for idx, row in subset_df.iterrows():
locs.append(Point(row.lon_sites, row.lat_sites))
rjb.append(row.dist_jb)
if row.flag_bedrock == 1:
bedrock = True
# Create the sites
sites = Dummy.get_site_collection(len(rjb), vs30=800., location=locs)
# bed_flag = Dummy.get_site_collection(len(rjb), flag=bedrock)
# Create distance and rupture contexts
rup = Dummy.get_rupture(mag=row.rup_mag, ev_lat=row.lat_epi, ev_lon=row.lon_epi)
dists = DistancesContext()
dists.rjb = np.array(rjb)
# Instantiate the GMM
if i == 0:
gmm = SgobbaEtAl2020(event_id=ev_id, site=sites, bedrock=False) # cluster=None because cluster has to be automatically detected
else:
gmm = SgobbaEtAl2020(event_id=ev_id, site=sites, bedrock=True)
# Computes results for the non-ergodic model
periods = [PGA(), SA(period=0.2), SA(period=0.50251256281407), SA(period=1.0), SA(period=2.0)]
tags = ['gmm_PGA', 'gmm_SA02', 'gmm_SA05', 'gmm_SA10', 'gmm_SA20']
stdt = [const.StdDev.TOTAL]
# Compute and check results for the NON ergodic model
for i in range(len(periods)):
imt = periods[i]
tag = tags[i]
mean, stdr = gmm.get_mean_and_stddevs(sites, rup, dists, imt, stdt)
expected = subset_df[tag].to_numpy() # Verif Table in g unit
computed = np.exp(mean) # in OQ are computed in g Units in ln
np.testing.assert_allclose(computed, expected, rtol=1e-5)
def test_SIGMA(self):
# Read dataframe with information
fname = 'ValidationTable_STD_full.csv'
df = pd.read_csv(os.path.join(DATA_FOLDER, fname))
# Read dataframe with information
fname2 = 'event.csv'
df2 = pd.read_csv(os.path.join(DATA_FOLDER2, fname2), dtype={'id': str})
# Check number of events
epicenters = []
for idx, row in df.iterrows():
x = Point(row.lon_epi, row.lat_epi)
if x not in epicenters:
epicenters.append(x)
for i in range(len(epicenters)):
LON = epicenters[i].x
LAT = epicenters[i].y
idx = np.where((df['lat_epi'] == LAT) & (df['lon_epi'] == LON))
subset_df = df.loc[idx]
flag_b = subset_df['flag_bedrock']
if sum(flag_b) > 0:
bedrock = [0, 1]
else:
bedrock = [0]
for i in bedrock:
idx = np.where((df['lat_epi'] == LAT) & (df['lon_epi'] == LON) & (df['flag_bedrock'] == i))
subset_df = df.loc[idx]
idx2 = np.where((df2['Ev_lat'] == LAT) & (df2['Ev_lon'] == LON))[0]
if len(idx2) > 0:
idx2 = idx2[0]
ev_id = df2['id'][idx2]
else:
ev_id = None
print('event_id: '+str(ev_id))
print('flag_bedrock: '+str(i))
# Get parameters
locs = []
rjb = []
bedrock = False
for idx, row in subset_df.iterrows():
locs.append(Point(row.lon_sites, row.lat_sites))
rjb.append(row.dist_jb)
if row.flag_bedrock == 1:
bedrock = True
# Create the sites
sites = Dummy.get_site_collection(len(rjb), vs30=800., location=locs)
# bed_flag = Dummy.get_site_collection(len(rjb), flag=bedrock)
# Create distance and rupture contexts
rup = Dummy.get_rupture(mag=row.rup_mag, ev_lat=row.lat_epi, ev_lon=row.lon_epi)
dists = DistancesContext()
dists.rjb = np.array(rjb)
# Instantiate the GMM
if i == 0:
gmm = SgobbaEtAl2020(event_id=ev_id, site=sites, bedrock=False) # cluster=None because cluster has to be automatically detected
else:
gmm = SgobbaEtAl2020(event_id=ev_id, site=sites, bedrock=True)
# Computes results for the non-ergodic model
periods = [PGA(), SA(period=0.2), SA(period=0.50251256281407), SA(period=1.0), SA(period=2.0)]
tags = ['PGA', 'SA02', 'SA05', 'SA10', 'SA20']
stdt = [const.StdDev.TOTAL]
# Compute and check results for the NON ergodic model
for i in range(len(periods)):
imt = periods[i]
tag = tags[i]
mean, stdr = gmm.get_mean_and_stddevs(sites, rup, dists, imt, stdt)
expected = np.log(10.0**subset_df[tag].to_numpy()) # in VerifTable are in log10
computed = stdr # in ln
np.testing.assert_allclose(computed, expected, rtol=1e-5)
# THE END!
| agpl-3.0 |
ephes/scikit-learn | sklearn/grid_search.py | 103 | 36232 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
IndraVikas/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
andaag/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
YudinYury/Python_Netology_homework | less_4_2_hw_1_visualization.py | 1 | 3030 | """lesson_4_2_Homework "Data visualization"
"""
import os
import pandas as pd
source_path = 'D:\Python_my\Python_Netology_homework\data_names'
source_dir_path = os.path.normpath(os.path.abspath(source_path))
def download_year_data(year):
source_file = os.path.normpath(os.path.join(source_dir_path, 'yob{}.txt'.format(year)))
year_data = pd.read_csv(source_file, names=['Name', 'Gender', 'Count'])
# year_data['Year'] = year_data.apply(lambda x: int(year), axis=1)
year_data = year_data.drop(['Gender'], axis=1)
# print(year_data.query('Name == "Ruth" | Name == "Robert"').groupby('Name').sum())
return year_data.query('Name == ["Ruth", "Robert"]').groupby('Name').sum()
def ruth_n_robert():
global source_dir_path
names = []
names_dict = {}
# ruth_n_robert_all_time = reduce(lambda x,y: pd.concat([x,y]), [download_year_data(i) for i in range(1900, 1903)])
# data = download_year_data(1900)
ruth_n_robert_all_time = {}
for i in range(1900, 1904):
# ruth_n_robert_all_time = pd.concat([data, download_year_data(i)])
# names.append(download_year_data(i))
names_dict[i] = download_year_data(i)
ruth_n_robert_all_time = pd.concat(names_dict, names=['Year'])
# print(ruth_n_robert_all_time)
print()
# print(ruth_n_robert_all_time.unstack('Name'))
ruth_n_robert_all_time.unstack('Name').plot(title='Ruth vs Robert', grid=True)
# gender_dynamics_cols.plot(title='Dynamics', grid=True)
return 0
def main():
ruth_n_robert()
# df = pd.DataFrame(np.random.randn(50, 3), columns=['Z', 'B', 'C'])
# plot = df.plot()
# fig = plot.get_figure()
# fig.savefig('less_4_2_fig_graph.png')
# names_by_year = {}
# for year in range(1900, 2001, 10):
# names_by_year[year] = pd.read_csv('D:\Python_my\Python_Netology_homework\data_names\yob{}.txt'.format(year),
# names=['Name', 'Gender', 'Count'])
# names_all = pd.concat(names_by_year, names=['Year', 'Pos'])
# # print(names_all.head(10))
# name_dynamics = names_all.groupby([names_all.index.get_level_values(0), 'Name']).sum()
# # print(name_dynamics.head(10))
# print(name_dynamics.query('Name == ["John", "Mary", "William"]'))
# print(name_dynamics.query('Name == ["John", "Mary", "William"]').unstack('Name').plot)
# gender_dynamics = names_all.groupby([names_all.index.get_level_values(0), 'Name']).sum()
# gender_dynamics_cols = gender_dynamics.unstack('Gender')
# gender_dynamics_cols.plot(title='Dynamics', grid=True)
# name_dynamics.query('Name == ["John", "Mary", "William"]').unstack('Name').plot.bar()
# names_for_pie = names_all.groupby('Name').sum().sort_values(by='Count', ascending=False).head(5)
# names_for_pie.plot.pie(y='Count')
# names = pd.read_csv('D:\Python_my\Python_Netology_homework\data_names\yob2000.txt', names=['Name', 'Gender', 'Count'])
# print(names.head(10))
exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 |
shusenl/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
alubbock/pysb | pysb/examples/cupsoda/run_michment_cupsoda.py | 5 | 1878 | from pysb.examples.michment import model
from pysb.simulator.cupsoda import run_cupsoda
import numpy as np
import matplotlib.pyplot as plt
import itertools
def run():
# factors to multiply the values of the initial conditions
multipliers = np.linspace(0.8, 1.2, 11)
# 2D array of initial concentrations
initial_concentrations = [
multipliers * ic.value.value for ic in model.initials
]
# Cartesian product of initial concentrations
cartesian_product = itertools.product(*initial_concentrations)
# the Cartesian product object must be cast to a list, then to a numpy array
# and transposed to give a (n_species x n_vals) matrix of initial concentrations
initials_matrix = np.array(list(cartesian_product)).T
# we can now construct the initials dictionary
initials = {
ic.pattern: initials_matrix[i] for i, ic in enumerate(model.initials)
}
# simulation time span and output points
tspan = np.linspace(0, 50, 501)
# run_cupsoda returns a 3D array of species and observables trajectories
trajectories = run_cupsoda(model, tspan, initials=initials,
integrator_options={'atol': 1e-10, 'rtol': 1e-4},
verbose=True)
# extract the trajectories for the 'Product' into a numpy array and
# transpose to aid in plotting
x = np.array([tr['Product'] for tr in trajectories]).T
# plot the mean, minimum, and maximum concentrations at each time point
plt.plot(tspan, x.mean(axis=1), 'b', lw=3, label="Product")
plt.plot(tspan, x.max(axis=1), 'b--', lw=2, label="min/max")
plt.plot(tspan, x.min(axis=1), 'b--', lw=2)
# define the axis labels and legend
plt.xlabel('time')
plt.ylabel('concentration')
plt.legend(loc='upper left')
# show the plot
plt.show()
if __name__ == '__main__':
run()
| bsd-2-clause |
erdc/proteus | proteus/mprans/beamFEM.py | 1 | 16450 | from __future__ import division
from builtins import range
from builtins import object
from past.utils import old_div
import numpy as np
#import scipy as sp
#import matplotlib.pyplot as plt
import math
import numpy.linalg as linalg
class FEMTools(object):
def __init__(self,
L=1.0,
nElements=10,
quadOrder=3,
EI=1.0e3,
GJ=1.0e3,
nlTol=1.0e-6,
useSparse=False,
beamLocation=(0.5, 0.5)):
self.L = L
self.nElements = nElements
self.quadOrder = quadOrder
self.EI = EI
self.GJ = GJ
self.nlTol = nlTol
self.useSparse = useSparse
self.beamLocation = beamLocation
def structuredMesh(self):
self.nNodes = 2 * self.nElements + 1
self.nDOF = 3 * self.nNodes
self.mesh = np.linspace(0.0, self.L, self.nElements + 1)
self.h = self.L / float(self.nElements) * np.ones(self.nElements)
return self.mesh, self.h
def initializePhi(self):
self.Phi = np.zeros(self.nDOF)
# self.Phi[3::3]=-math.pi/3.0
# for i in range(self.nNodes):
# self.Phi[3*i+2]=math.pi/4.0*float(i)/float(self.nNodes)
# self.Phi[4::3]=0.0
# self.Phi[5::3]=-math.pi/3.0
self.g = np.zeros(self.nDOF)
def GaussQuad(self):
if self.quadOrder == 2:
self.w = (1.0, 1.0)
self.zeta = (old_div(-1.0, 3.0**.5), old_div(1.0, 3.0**0.5))
#self.quadSpacing = (self.zeta[0]+1.0, self.zeta[1]-self.zeta[0], 1.0-self.zeta[1])
elif self.quadOrder == 3:
self.w = (old_div(5.0, 9.0), old_div(8.0, 9.0), old_div(5.0, 9.0))
self.zeta = (-(old_div(3.0, 5.0))**.5, 0.0, (old_div(3.0, 5.0))**0.5)
#self.quadSpacing = (self.zeta[0]+1.0, self.zeta[1]-self.zeta[0], self.zeta[2]-self.zeta[1],1.0-self.zeta[2])
def initializeCoords(self):
self.x = np.zeros(self.nElements + 1)
self.y = np.zeros(self.nElements + 1)
self.z = np.linspace(0.0, self.L, self.nElements + 1)
# return self.x, self.y, self.z
def basisFunctions(self):
if self.quadOrder == 2:
v1 = 1.0 / 6.0 * np.array([1.0 + 3.0**.5, 4.0, 1.0 - 3.0**.5])
v2 = 1.0 / 6.0 * np.array([1.0 - 3.0**.5, 4.0, 1.0 + 3.0**.5])
dv1 = 1.0 / 6.0 * np.array([-2.0 * 3.0**.5 - 3.0, 4.0 * 3.0**.5, -2.0 * 3.0**0.5 + 3.0])
dv2 = 1.0 / 6.0 * np.array([2.0 * 3.0**.5 - 3.0, -4.0 * 3.0**.5, 2.0 * 3.0**0.5 + 3.0])
self.v = [v1, v2]
self.dv = [dv1, dv2]
self.vv = [np.outer(self.v[0], self.v[0]), np.outer(self.v[1], self.v[1])]
self.dvdv = [np.outer(self.dv[0], self.dv[0]), np.outer(self.dv[1], self.dv[1])]
self.vdv = [np.outer(self.v[0], self.dv[0]), np.outer(self.v[1], self.dv[1])]
elif self.quadOrder == 3:
self.v = [np.array([0.6872983345e0, 0.4000000001e0, -0.8729833465e-1]), np.array([0.0, 0.10e1, 0.0]),
np.array([-0.8729833465e-1, 0.4000000001e0, 0.6872983345e0])]
self.dv = [np.array([-0.1274596669e1, 0.1549193338e1, -0.2745966692e0]), np.array([-0.50e0, 0.0, 0.50e0]),
np.array([0.2745966692e0, -0.1549193338e1, 0.1274596669e1])]
self.vv = [np.outer(self.v[0], self.v[0]), np.outer(self.v[1], self.v[1]), np.outer(self.v[2], self.v[2])]
self.dvdv = [np.outer(self.dv[0], self.dv[0]), np.outer(self.dv[1], self.dv[1]), np.outer(self.dv[2], self.dv[2])]
self.vdv = [np.outer(self.v[0], self.dv[0]), np.outer(self.v[1], self.dv[1]), np.outer(self.v[2], self.dv[2])]
def updateCoords(self):
#import pdb
# pdb.set_trace()
arcLength = 0.0
self.x[0] = 0.0
self.y[0] = 0.0
for i in range(self.nElements):
theta_el = np.array([self.Phi[6 * i], self.Phi[6 * i + 3], self.Phi[6 * (i + 1)]])
psi_el = np.array([self.Phi[6 * i + 1], self.Phi[6 * i + 4], self.Phi[6 * i + 7]])
phi_el = np.array([self.Phi[6 * i + 2], self.Phi[6 * i + 5], self.Phi[6 * i + 8]])
# self.x[i+1] = self.x[i]
# self.y[i+1] = self.y[i]
# self.z[i+1] = self.z[i]
xval = 0.0
yval = 0.0
zval = 0.0
for j in range(self.quadOrder):
theta = np.dot(self.v[j], theta_el)
phi = np.dot(self.v[j], phi_el)
xval += self.w[j] * math.cos(theta) * math.sin(phi)
yval += -self.w[j] * math.sin(theta) # math.sin(theta)*math.sin(psi)
zval += self.w[j] * math.cos(theta) * math.cos(phi)
self.x[i + 1] = self.x[i] + 0.5 * self.h[i] * xval
self.y[i + 1] = self.y[i] + 0.5 * self.h[i] * yval
self.z[i + 1] = self.z[i] + 0.5 * self.h[i] * zval
arcLength += ((self.x[i + 1] - self.x[i])**2 + (self.y[i + 1] - self.y[i])**2 + (self.z[i + 1] - self.z[i])**2)**0.5
# print arcLength, self.x[-1]
for i in range(self.nElements + 1):
self.x[i] += self.beamLocation[0]
self.y[i] += self.beamLocation[1]
return self.x[:], self.y[:], self.z[:]
def updateLoads(self, q1, q2, q3):
self.q1 = q1
self.q2 = q2
self.q3 = q3
def updateQs(self, endLoad, scale):
self.Q1 = np.zeros(self.nNodes)
self.Q2 = np.zeros(self.nNodes)
self.Q3 = np.zeros(self.nNodes)
count1 = endLoad[0]
count2 = endLoad[1]
count3 = endLoad[2]
for i in range(self.nElements, 0, -1):
self.Q1[i * 2] = count1
self.Q2[i * 2] = count2
self.Q3[i * 2] = count3
for j in range(self.quadOrder):
count1 += 0.5 * self.h[i - 1] * self.w[j] * self.q1[i - 1, j]
count2 += 0.5 * self.h[i - 1] * self.w[j] * self.q2[i - 1, j]
count3 += 0.5 * self.h[i - 1] * self.w[j] * self.q3[i - 1, j]
self.Q1[i * 2 - 1] = 0.5 * (self.Q1[i * 2] + count1)
self.Q2[i * 2 - 1] = 0.5 * (self.Q2[i * 2] + count2)
self.Q3[i * 2 - 1] = 0.5 * (self.Q3[i * 2] + count3)
self.Q1[0] = count1
self.Q2[0] = count2
self.Q3[0] = count3
# print self.Q1
# print self.Q2
self.Q1 *= -1.0 * scale
self.Q2 *= -1.0 * scale
self.Q3 *= -1.0 * scale
def calculateGradient_Hessian(self):
self.g = np.zeros(self.nDOF)
self.K = np.zeros((self.nDOF, self.nDOF))
for i in range(self.nElements):
theta_el = np.array([self.Phi[6 * i], self.Phi[6 * i + 3], self.Phi[6 * (i + 1)]])
psi_el = np.array([self.Phi[6 * i + 1], self.Phi[6 * i + 4], self.Phi[6 * i + 7]])
phi_el = np.array([self.Phi[6 * i + 2], self.Phi[6 * i + 5], self.Phi[6 * i + 8]])
Q1_el = np.array([self.Q1[2 * i], self.Q1[2 * i + 1], self.Q1[2 * i + 2]])
Q2_el = np.array([self.Q2[2 * i], self.Q2[2 * i + 1], self.Q2[2 * i + 2]])
Q3_el = np.array([self.Q3[2 * i], self.Q3[2 * i + 1], self.Q3[2 * i + 2]])
gstheta = np.zeros(3)
gspsi = np.zeros(3)
gsphi = np.zeros(3)
gltheta = np.zeros(3)
glpsi = np.zeros(3)
glphi = np.zeros(3)
Ksthetatheta = np.zeros((3, 3))
Ksthetapsi = np.zeros((3, 3))
Ksthetaphi = np.zeros((3, 3))
Kspsipsi = np.zeros((3, 3))
Kspsiphi = np.zeros((3, 3))
Ksphiphi = np.zeros((3, 3))
Klthetatheta = np.zeros((3, 3))
Klthetapsi = np.zeros((3, 3))
Klthetaphi = np.zeros((3, 3))
Klpsipsi = np.zeros((3, 3))
Klpsiphi = np.zeros((3, 3))
Klphiphi = np.zeros((3, 3))
for j in range(self.quadOrder):
theta = np.dot(self.v[j], theta_el)
psi = np.dot(self.v[j], psi_el)
phi = np.dot(self.v[j], phi_el)
thetad = np.dot(self.dv[j], theta_el)
psid = np.dot(self.dv[j], psi_el)
phid = np.dot(self.dv[j], phi_el)
Q1 = np.dot(self.v[j], Q1_el)
Q2 = np.dot(self.v[j], Q2_el)
Q3 = np.dot(self.v[j], Q3_el)
st = math.sin(theta)
ct = math.cos(theta)
if abs(ct) < 1.0e-6:
ct = math.copysign(1.0e-6, ct)
# self.w[j]*(self.EI*thetad*self.dv[j] + ((self.EI-self.GJ)*psid*psid*st*ct -self.GJ*psid*phid*st)*self.v[j])
gstheta += self.w[j] * (self.EI * thetad * self.dv[j] + ((self.GJ - self.EI) * phid**2 * st * ct - self.GJ * psid * phid * ct) * self.v[j])
# self.w[j]*(((self.EI*st*st+self.GJ*(1.0+ct*ct))*psid +self.GJ*phid*ct)*self.dv[j])
gspsi += self.GJ * self.w[j] * (psid - phid * st) * self.dv[j]
gsphi += self.w[j] * (self.EI * phid * ct * ct + self.GJ * (phid * st * st - psid * st)) * self.dv[j] # self.w[j]*(self.GJ*psid*ct*self.dv[j])
# self.w[j]*((Q1*ct*math.cos(psi) + Q2*ct*math.sin(psi) - Q3*st)*self.v[j])
gltheta += self.w[j] * (-Q1 * st * math.sin(phi) - Q2 * ct - Q3 * st * math.cos(phi)) * self.v[j]
#glpsi += self.w[j]*((-Q1*st*math.sin(psi) + Q2*st*math.cos(psi))*self.v[j])
glphi += self.w[j] * (Q1 * ct * math.cos(phi) - Q3 * ct * math.sin(phi)) * self.v[j]
# self.w[j]*(self.EI*self.dvdv[j] +((self.EI-self.GJ)*(2.0*ct*ct-1.0)-self.GJ*psid*phid*ct)*self.vv[j])
Ksthetatheta += self.w[j] * (self.EI * self.dvdv[j] + (self.GJ - self.EI) * (2.0 * ct * ct - 1.0)
* phid * phid * self.vv[j] + self.GJ * psid * phid * st * self.vv[j])
Ksthetapsi += -self.GJ * self.w[j] * phid * ct * self.vdv[j] # self.w[j]*((2.0*(self.EI-self.GJ)*phid*st*ct -self.GJ*phid*st)*self.vdv[j])
Ksthetaphi += self.w[j] * (2.0 * (self.GJ - self.EI) * phid * st * ct - self.GJ * psid * ct) * \
self.vdv[j] # self.w[j]*(-self.GJ*phid*st*self.vdv[j])
Kspsipsi += self.GJ * self.w[j] * self.dvdv[j] # self.w[j]*((self.EI*st*st + self.GJ*(1.0+ct*ct))*self.dvdv[j])
Kspsiphi += -self.GJ * self.w[j] * st * self.dvdv[j] # self.w[j]*(self.GJ*ct*self.dvdv[j])
Ksphiphi += self.w[j] * (self.EI * ct * ct + self.GJ * st * st) * self.dvdv[j]
Klthetatheta += self.w[j] * (-Q1 * ct * math.sin(phi) + Q2 * st - Q3 * ct * math.cos(phi)) * \
self.vv[j] # self.w[j]*((-Q1*st*math.cos(psi) -Q2*st*math.sin(psi) -Q3*ct)*self.vv[j])
#Klthetapsi += self.w[j]*((-Q1*ct*math.sin(psi) + Q2*ct*math.cos(psi))*self.vv[j])
#Klpsipsi += self.w[j]*((-Q1*st*math.cos(psi) - Q2*st*math.sin(psi))*self.vv[j])
Klthetaphi += self.w[j] * (-Q1 * st * math.cos(phi) + Q3 * st * math.sin(phi)) * self.vv[j]
Klphiphi += self.w[j] * (-Q1 * ct * math.sin(phi) - Q3 * ct * math.cos(phi)) * self.vv[j]
# import pdb
# pdb.set_trace()
self.K[i * 6:i * 6 + 9:3, i * 6:i * 6 + 9:3] += 2.0 / self.h[i] * Ksthetatheta + 0.5 * self.h[i] * Klthetatheta
self.K[i * 6:i * 6 + 9:3, i * 6 + 1:i * 6 + 9:3] += 2.0 / self.h[i] * Ksthetapsi + 0.5 * self.h[i] * Klthetapsi
self.K[i * 6 + 1:i * 6 + 9:3, i * 6:i * 6 + 9:3] += np.transpose(2.0 / self.h[i] * Ksthetapsi + 0.5 * self.h[i] * Klthetapsi)
self.K[i * 6:i * 6 + 9:3, i * 6 + 2:i * 6 + 9:3] += 2.0 / self.h[i] * Ksthetaphi + 0.5 * self.h[i] * Klthetaphi
self.K[i * 6 + 2:i * 6 + 9:3, i * 6:i * 6 + 9:3] += np.transpose(2.0 / self.h[i] * Ksthetaphi + 0.5 * self.h[i] * Klthetaphi)
self.K[i * 6 + 1:i * 6 + 9:3, i * 6 + 1:i * 6 + 9:3] += 2.0 / self.h[i] * Kspsipsi + 0.5 * self.h[i] * Klpsipsi
self.K[i * 6 + 1:i * 6 + 9:3, i * 6 + 2:i * 6 + 9:3] += 2.0 / self.h[i] * Kspsiphi + 0.5 * self.h[i] * Klpsiphi
self.K[i * 6 + 2:i * 6 + 9:3, i * 6 + 1:i * 6 + 9:3] += np.transpose(2.0 / self.h[i] * Kspsiphi + 0.5 * self.h[i] * Klpsiphi)
self.K[i * 6 + 2:i * 6 + 9:3, i * 6 + 2:i * 6 + 9:3] += 2.0 / self.h[i] * Ksphiphi + 0.5 * self.h[i] * Klphiphi
self.g[i * 6:i * 6 + 9:3] += 2.0 / self.h[i] * gstheta + 0.5 * self.h[i] * gltheta
self.g[i * 6 + 1:i * 6 + 9:3] += 2.0 / self.h[i] * gspsi + 0.5 * self.h[i] * glpsi
self.g[i * 6 + 2:i * 6 + 9:3] += 2.0 / self.h[i] * gsphi + 0.5 * self.h[i] * glphi
#import pdb
# pdb.set_trace()
def calculateResidual(self):
# import numpy.linalg
# ut, nt, vt = np.linalg.svd(self.K)
# rank = np.sum(nt > 1e-15)
# Kondition = np.linalg.cond(self.K)
#import pdb
# pdb.set_trace()
# if self.useSparse:
# self.K=self.K.tocsr()
# self.Residual = spsolve(self.K,self.g)
# #self.Residual = cg(self.K,self.g,tol=1.0e-5)[0]
# else:
self.Residual = linalg.solve(self.K, self.g)
#import pdb
# pdb.set_trace()
self.error = linalg.norm(self.Residual, np.inf)
return self.error
def updateSolution(self):
count = 0
for i in range(3, self.nDOF):
if (i - 3) in self.deleteList:
count += 1
else:
self.Phi[i] -= self.Residual[i - 3 - count]
#self.Phi[3::] -= self.Residual
def checkConvergence(self):
if self.error < self.nlTol:
return False
else:
return True
def setBCs(self):
self.K = self.K[3::, 3::]
self.g = self.g[3::]
def reduceOrder(self):
newg = self.g[:]
# newK=self.K[:,:]
self.deleteList = []
def getCoords_Qs_at_Quad(self):
x_quad = np.zeros(self.quadOrder * self.nElements)
y_quad = np.zeros(self.quadOrder * self.nElements)
z_quad = np.zeros(self.quadOrder * self.nElements)
Q1_quad = np.zeros(self.quadOrder * self.nElements)
Q2_quad = np.zeros(self.quadOrder * self.nElements)
Q3_quad = np.zeros(self.quadOrder * self.nElements)
for i in range(self.nElements):
Q1_el = np.array([self.F1[2 * i], self.F1[2 * i + 1], self.F1[2 * i + 2]])
Q2_el = np.array([self.F2[2 * i], self.F2[2 * i + 1], self.F2[2 * i + 2]])
Q3_el = np.array([self.F3[2 * i], self.F3[2 * i + 1], self.F3[2 * i + 2]])
x_el = np.array([self.x[i], 0.5 * (self.x[i + 1] + self.x[i]), self.x[i + 1]])
y_el = np.array([self.y[i], 0.5 * (self.y[i + 1] + self.y[i]), self.y[i + 1]])
z_el = np.array([self.z[i], 0.5 * (self.z[i + 1] + self.z[i]), self.z[i + 1]])
for j in range(self.quadOrder):
Q1_quad[self.quadOrder * i + j] += np.dot(self.v[j], Q1_el)
Q2_quad[self.quadOrder * i + j] += np.dot(self.v[j], Q2_el)
Q3_quad[self.quadOrder * i + j] += np.dot(self.v[j], Q3_el)
x_quad[self.quadOrder * i + j] += np.dot(self.v[j], x_el)
y_quad[self.quadOrder * i + j] += np.dot(self.v[j], y_el)
z_quad[self.quadOrder * i + j] += np.dot(self.v[j], z_el)
weights = np.array(self.w)
return x_quad[:], y_quad[:], z_quad[:], Q1_quad[:], Q2_quad[:], Q3_quad[:], weights
def getCoords_at_Quad(self):
x_quad = np.zeros(self.quadOrder * self.nElements)
y_quad = np.zeros(self.quadOrder * self.nElements)
z_quad = np.zeros(self.quadOrder * self.nElements)
for i in range(self.nElements):
x_el = np.array([self.x[i], 0.5 * (self.x[i + 1] + self.x[i]), self.x[i + 1]])
y_el = np.array([self.y[i], 0.5 * (self.y[i + 1] + self.y[i]), self.y[i + 1]])
z_el = np.array([self.z[i], 0.5 * (self.z[i + 1] + self.z[i]), self.z[i + 1]])
for j in range(self.quadOrder):
x_quad[self.quadOrder * i + j] += np.dot(self.v[j], x_el)
y_quad[self.quadOrder * i + j] += np.dot(self.v[j], y_el)
z_quad[self.quadOrder * i + j] += np.dot(self.v[j], z_el)
weights = np.array(self.w)
return x_quad[:], y_quad[:], z_quad[:]
| mit |
thehackerwithin/berkeley | code_examples/numpyVectorization/diffusion.py | 9 | 5427 | ''' Functions for comparing vectorization performance of simplified
diffusion in 1D and 2D.
http://en.wikipedia.org/wiki/Finite_difference_method#Example:_The_heat_equation
'''
import numpy as np
from matplotlib import pyplot as plt
plt.ion()
def diff1d_loop( n_iter=200, rate=.5, n_x=100, plotUpdate=True,
showPlots=True ):
''' A simple finite difference diffusion example using for loops.
This is the slow version.
The inputs are:
n_iter - the number of diffusion iterations
rate - the diffusion rate
n_x - number of grid points
plotUpdate - if the plot should be updated at each iteration. this will
slow the computation
'''
x = np.linspace(-30,30, n_x)
# set the initial conditions for the diffusion
y_init = np.zeros(n_x)
# y_init[n_x/2] = 1
y_init[1] = 1
y_init[-1] = 1
y_all = np.zeros((n_x, n_iter))
if plotUpdate:
fig = plt.figure()
lines = plt.step(x, y_init)
plt.show()
y_next = y_init.copy()
for i in range(n_iter):
y_last = y_next.copy()
y_all[:,i] = y_next
for j in range(1,n_x-1):
y_next[j] = rate*.5*(y_last[j+1] + y_last[j-1]) + (1-rate)*y_last[j]
y_next[y_init>0] = y_init[y_init>0]
if plotUpdate:
lines[0].set_data( x, y_next)
fig.canvas.draw()
def diff1d_vec( n_iter=200, rate=.5, n_x=100, plotUpdate=True,
showPlots=True ):
''' A simple finite difference diffusion example using numpy vecorization.
The inputs are:
n_iter - the number of diffusion iterations
rate - the diffusion rate
n_x - number of grid points
plotUpdate - if the plot should be updated at each iteration. this will
slow the computation
'''
x = np.linspace(-30,30, n_x)
# set the initial conditions for the diffusion
y_init = np.zeros(n_x)
# y_init[n_x/2] = 1
y_init[1] = 1
y_init[-1] = 1
y_all = np.zeros((n_x, n_iter))
if plotUpdate:
fig = plt.figure()
lines = plt.step(x, y_init)
plt.show()
y_next = y_init.copy()
for i in range(n_iter):
y_all[:,i] = y_next
y_next[1:-1] = rate*(y_next[2:] + y_next[:-2]) + \
(1-2*rate)*y_next[1:-1]
#y_next[1:-1] = rate*.5*(y_next[2:] + y_next[:-2]) + \
#(1-rate)*y_next[1:-1]
#y_next[0] = rate*y_next[1] + (1-rate)*y_next[0]
#y_next[-1] = rate*y_next[-2] + (1-rate)*y_next[-1]
y_next[y_init>0] = y_init[y_init>0]
if plotUpdate:
lines[0].set_data( x, y_next)
fig.canvas.draw()
if showPlots and not plotUpdate:
fig = plt.figure()
lines = plt.step(x, y_next)
plt.show()
return y_all
def diff2d_loop( n_iter = 100, rate = .5, n_x = 100, plotUpdate=True,
showPlots=True ):
''' A 2-D finite difference diffusion example using numpy vecorization.
The inputs are:
n_iter - the number of diffusion iterations
rate - the diffusion rate
n_x - number of grid points
plotUpdate - if the plot should be updated at each iteration. this will
slow the computation
'''
y_init = np.zeros((n_x, n_x))
# set the initial conditions
# y_init[n_x/2, n_x/2] = 1
y_init[n_x/2,] = 1
if plotUpdate:
fig = plt.figure()
im = plt.imshow(y_init)
plt.show()
y_next = y_init.copy()
for i in range(n_iter):
for j in range(1,n_x-1):
for k in range(1,n_x-1):
y_next[j,k] = rate*.25*(y_next[j-1,k] + y_next[j+1,k] +\
y_next[j, k+1] + y_next[j, k-1]) +\
(1-rate)*y_next[j, k]
y_next[y_init>0] = y_init[y_init>0]
# y_next[50, 50] = 1
if plotUpdate:
im.set_data( y_next)
fig.canvas.draw()
return y_next
if showPlots and not plotUpdate:
fig = plt.figure()
im = plt.imshow(y_next)
plt.show()
return y_next
def diff2d_vec( n_iter = 100, rate = .5, n_x = 100, plotUpdate=True,
showPlots=True ):
''' A 2-D finite difference diffusion example using numpy vecorization.
The inputs are:
n_iter - the number of diffusion iterations
rate - the diffusion rate
n_x - number of grid points
plotUpdate - if the plot should be updated at each iteration. this will
slow the computation
'''
y_init = np.zeros((n_x, n_x))
# set the initial conditions
# y_init[n_x/2, n_x/2] = 1
y_init[n_x/2,] = 1
if plotUpdate:
fig = plt.figure()
im = plt.imshow(y_init)
plt.show()
y_next = y_init.copy()
for i in range(n_iter):
y_next[1:-1, 1:-1] = rate*.25*(y_next[2:,1:-1] + y_next[:-2,1:-1] +\
y_next[1:-1, 2:] + y_next[1:-1, :-2]) +\
(1-rate)*y_next[1:-1, 1:-1]
y_next[y_init>0] = y_init[y_init>0]
# y_next[50, 50] = 1
if plotUpdate:
im.set_data( y_next)
fig.canvas.draw()
if showPlots and not plotUpdate:
fig = plt.figure()
im = plt.imshow(y_next)
plt.show()
return y_next
| bsd-3-clause |
mayblue9/bokeh | examples/charts/file/boxplot.py | 37 | 1117 | from collections import OrderedDict
import pandas as pd
from bokeh.charts import BoxPlot, output_file, show
from bokeh.sampledata.olympics2014 import data
# create a DataFrame with the sample data
df = pd.io.json.json_normalize(data['data'])
# filter by countries with at least one medal and sort
df = df[df['medals.total'] > 0]
df = df.sort("medals.total", ascending=False)
# get the countries and group the data by medal type
countries = df.abbr.values.tolist()
gold = df['medals.gold'].astype(float).values
silver = df['medals.silver'].astype(float).values
bronze = df['medals.bronze'].astype(float).values
# build a dict containing the grouped data
medals = OrderedDict(bronze=bronze, silver=silver, gold=gold)
# any of the following commented are valid BoxPlot inputs
#medals = pd.DataFrame(medals)
#medals = list(medals.values())
#medals = tuple(medals.values())
#medals = np.array(list(medals.values()))
output_file("boxplot.html")
boxplot = BoxPlot(
medals, marker='circle', outliers=True, title="boxplot test",
xlabel="medal type", ylabel="medal count", width=800, height=600)
show(boxplot) | bsd-3-clause |
cragis/SimpleSpectrumAnalyzer | SAv9.py | 1 | 9409 | #by Craig Howald 2017 to remain free from restriction
#mods added by Kevin Thomas
from matplotlib.figure import Figure
import numpy as np
import matplotlib.pyplot as plt
import rtlsdr
from matplotlib.mlab import psd
import tkinter as Tk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import rtlsdr
import csv # added by Kevin
print("a")
root = Tk.Tk()
print("b")
root.wm_title("Spectrum Analyzer")
print("c")
root.columnconfigure(0, weight=1)
print("d")
root.rowconfigure(0, weight=1)
print("e")
sdr = rtlsdr.RtlSdr()
print("f")
# added by Kevin; spreadsheet data scale
distance = 100 #cm
# configure device
print("g")
sdr.sample_rate = 2.048e6
sdr.center_freq = 1300e6-.3e6
sdr.gain = 40.2
cindex=600;
bw=50;
ilow=2400
ihigh=2500
timelength=1001;
print("h")
#tsdata[:]=np.nan;
print("i")
NFFT = 256*4
NUM_SAMPLES_PER_SCAN = NFFT*64
centerindex=int(NFFT/2)
print("j")
fc=sdr.fc;
rs=sdr.rs;
print("j1")
findex=np.arange(0,NFFT)
print("j2")
freq=((fc-rs/2)+findex*(rs/NFFT))/1e6
print("j3")
#run twice to lose bad starting data
samples0 = sdr.read_samples(NUM_SAMPLES_PER_SCAN)
print("j4")
samples = sdr.read_samples(NUM_SAMPLES_PER_SCAN)
print("j5")
psd_scan, f = psd(samples, NFFT=NFFT)
psd_scan[centerindex-3:centerindex+3]=np.nan
graphdata=10*np.log10(psd_scan)-sdr.gain
graphdatasmoothed=graphdata
print("k")
plt.minorticks_on();
fig, axarr = plt.subplots(2)
ax=axarr[0];
ax2=axarr[1];
ax.grid(True)
ax.set_title("RF Intensity")
ax.set_xlabel("Frequency (MHz)")
ax.set_ylabel("Signal (dB)")
print("l")
ax2.minorticks_on();
ax2.grid(b=True,which='major')
ax2.grid(b=True,which='minor')
ax2.set_xlabel("sample #")
ax2.set_ylabel("Peak Signal (dB)")
print("m")
fig.subplots_adjust(hspace=.5)
print("n")
vline=ax.axvline(x=freq[cindex],color="#7700ff")
lindex=max(0,cindex-bw)
rindex=min(NFFT-1,cindex+bw)
vlinel=ax.axvline(x=freq[lindex],color='#b0b0b0')
vliner=ax.axvline(x=freq[rindex],color='#b0b0b0')
print("o")
tsdata=[]
print("p")
line2,=ax.plot(freq,graphdatasmoothed,color="#e3ccff")
line1,=ax.plot(freq,graphdata)
tsplot=tsdata[0:10]
line3,=ax2.plot(np.zeros(timelength), color="#5e0da5")
point, =ax.plot(freq[cindex],graphdata[cindex],'rx')
ax.ticklabel_format(useOffset=False)
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.draw()
canvas.get_tk_widget().grid(row=0,columnspan=6,sticky=Tk.NSEW)
print("q")
#toolbar = NavigationToolbar2TkAgg( canvas, root )
#toolbar.update()
canvas._tkcanvas.grid(row=0,columnspan=8)
print("r")
var1 = Tk.StringVar(root,"")
var2 = Tk.StringVar(root,"")
var3 = Tk.StringVar(root,"")
var4 = Tk.StringVar(root,"")
var5 = Tk.StringVar(root,"")
var6 = Tk.StringVar(root,"")
print("s")
def __init__(self, master):
master.columnconfigure(0, weight=1)
master.rowconfigure(0, weight=1)
print("init")
def click(event):
global cindex,lindex,rindex
x=event.x
y=event.y
inv = ax.transData.inverted()
datapos = inv.transform((x,y))
cindex = np.argmin(np.abs(freq - datapos[0]))
lindex=max(0,cindex-bw)
rindex=min(NFFT-1,cindex+bw)
vline.set(xdata=freq[cindex])
print(click)
def left(event):
global cindex,bw,rindex,lindex
cindex=cindex-1
lindex=max(0,cindex-bw)
rindex=min(NFFT-1,cindex+bw)
vline.set(xdata=freq[cindex])
vlinel.set(xdata=freq[lindex])
vliner.set(xdata=freq[rindex])
print(left)
def right(event):
global cindex,bw,rindex,lindex
cindex=cindex+1
lindex=max(0,cindex-bw)
rindex=min(NFFT-1,cindex+bw)
vline.set(xdata=freq[cindex])
vlinel.set(xdata=freq[lindex])
vliner.set(xdata=freq[rindex])
print(right)
def uparrow(event):
global sdr,NFFT,NUM_SAMPLES_PER_SCAN,freq
sdr.fc=sdr.fc+50e6
fc=sdr.fc;
rs=sdr.rs;
findex=np.arange(0,NFFT)
freq=((fc-rs/2)+findex*(rs/NFFT))/1e6
line1.set_xdata(freq)
line2.set_xdata(freq)
ax.autoscale()
print(uparrow)
def downarrow(event):
global sdr,NFFT,NUM_SAMPLES_PER_SCAN,freq
sdr.fc=sdr.fc-50e6
fc=sdr.fc;
rs=sdr.rs;
findex=np.arange(0,NFFT)
freq=((fc-rs/2)+findex*(rs/NFFT))/1e6
line1.set_xdata(freq)
line2.set_xdata(freq)
print(downarrow)
def spacebar(event): #Changed by Kevin. Ideally, I would make my own keybind.
global graphdata, distance
# I don't remember why I chose this path.
with open("data_sampled.csv", "a", encoding="utf-8", newline="") as csvfile:
# Seems overly complicated.
csvwriter = csv.writer(csvfile)
csvwriter.writerow([distance, max(graphdata[lindex:rindex])])
# It assumes that you move the reflector 1cm each time.
distance = distance - 1
print(spacebar)
def shiftdownarrow(event):
global sdr,NFFT,NUM_SAMPLES_PER_SCAN,freq
sdr.fc=sdr.fc-2e6
fc=sdr.fc;
rs=sdr.rs;
findex=np.arange(0,NFFT)
freq=((fc-rs/2)+findex*(rs/NFFT))/1e6
line1.set_xdata(freq)
line2.set_xdata(freq)
print(shiftdownarrow)
def shiftuparrow(event):
global sdr,NFFT,NUM_SAMPLES_PER_SCAN,freq
sdr.fc=sdr.fc+3e6
fc=sdr.fc;
rs=sdr.rs;
findex=np.arange(0,NFFT)
freq=((fc-rs/2)+findex*(rs/NFFT))/1e6
line1.set_xdata(freq)
line2.set_xdata(freq)
print(shiftuparrow)
def GetData():
global tsdata,graphdata,graphdatasmoothed,f,sdr,NUM_SAMPLES_PER_SCAN,NFFT,centerindex,tsplot,lindex,rindex
samples = sdr.read_samples(NUM_SAMPLES_PER_SCAN)
psd_scan, f = psd(samples, NFFT=NFFT)
psd_scan[centerindex-3:centerindex+3]=np.nan
graphdata=10*np.log10(psd_scan)-sdr.gain
graphdatasmoothed=graphdatasmoothed*.8+graphdata*.2
window=10**(graphdata[lindex:rindex]/10)
bw=wBW.get()
lindex=max(0,cindex-bw)
rindex=min(NFFT-1,cindex+bw)
BWpower=10*np.log10(sum(window))
#y=max(graphdata[lindex:rindex])
tsdata.append(BWpower);
#tsdata.append(lindex);
tsplot=tsdata[-timelength:];
tsplot = tsplot + [0]*(timelength - len(tsplot))
root.after(wScale2.get(),GetData)
def RealtimePlottter():
global graphdata,line1,point,bw,tsdata,line3
bw=wBW.get()
freq_template = 'mark freq = %.4f MHz'
v2_template = 'mark = %.2f dB, '
v3_template = 'ave = %.2f dB'
v4_template = 'peak = %.2f dB, '
v5_template = 'ave = %.2f dB'
v6_template = 'BW power = %.2f dB'
ax.set_ylim([wVbottom.get(),wVtop.get()])
ax2.set_ylim([wVbottom.get(),wVtop.get()])
ax.set_xlim([min(freq),max(freq)])
ax2.set_xlim([0,timelength-1])
line1.set_ydata(graphdata)
line2.set_ydata(graphdatasmoothed)
line3.set_ydata(tsplot);
lindex=max(0,cindex-bw)
rindex=min(NFFT-1,cindex+bw)
vline.set(xdata=freq[cindex])
vlinel.set(xdata=freq[lindex])
vliner.set(xdata=freq[rindex])
var1.set(freq_template % (freq[cindex]))
var2.set(v2_template % (graphdata[cindex]))
var3.set(v3_template % (graphdatasmoothed[cindex]))
var4.set(v4_template % (max(graphdata[lindex:rindex])))
var5.set(v5_template % (max(graphdatasmoothed[lindex:rindex])))
var6.set(v6_template % (tsdata[-1]))
point.set_data(freq[cindex],graphdata[cindex])
canvas.draw()
root.after(25,RealtimePlottter)
def cleardata():
global tsdata
tsdata=[]
print(cleardata)
def _quit():
root.quit() # stops mainloop
np.savetxt('SAdata.txt', tsdata)
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
canvas._tkcanvas.bind('<Button-1>',click)
print("t")
button = Tk.Button(master=root, text='Self-destruct', command=_quit)
print("u")
clearbutton= Tk.Button(master=root, text='Clear data',command=cleardata)
print("v")
wVtop = Tk.Scale(master=root,label="Signal Max", from_=0, to=-100,orient="vertical")
wVtop.set(-10)
wVbottom = Tk.Scale(master=root,label="Signal Min", from_=0, to=-100,orient="vertical")
wVbottom.set(-80)
print("w")
wScale2 = Tk.Scale(master=root,label="Delay:", from_=1, to=200,sliderlength=30,orient=Tk.HORIZONTAL)
wScale2.set(10)
wBW = Tk.Scale(master=root,label="Bandwidth:", from_=1, to=128,sliderlength=20,length=200,orient=Tk.HORIZONTAL)
wBW.set(10)
print("x")
root.bind('<Left>',left)
root.bind('<Right>',right)
root.bind('<Up>',uparrow)
root.bind('<Down>',downarrow)
root.bind('<Shift-Up>',shiftuparrow)
root.bind('<Shift-Down>',shiftdownarrow)
root.bind('<space>',spacebar)
print("y")
wValue1= Tk.Label(master=root,textvariable=var1,font=("Helvetica", 24))
wValue2= Tk.Label(master=root,textvariable=var2,font=("Helvetica", 36))
wValue3= Tk.Label(master=root,textvariable=var3,font=("Helvetica", 36))
wValue4= Tk.Label(master=root,textvariable=var4,font=("Helvetica", 36))
wValue5= Tk.Label(master=root,textvariable=var5,font=("Helvetica", 36))
wValue6= Tk.Label(master=root,textvariable=var6,font=("Helvetica", 36))
print("z")
#figure is in 0,0
wScale2.grid(row=1,column=0)
print("aa")
wBW.grid(row=1,column=1,columnspan=2)
wVtop.grid(row=2,column=1)
wVbottom.grid(row=2,column=2)
wValue1.grid(row=1,column=3)
wValue2.grid(row=2,column=3)
wValue3.grid(row=2,column=4)
wValue4.grid(row=3,column=3)
wValue5.grid(row=3,column=4)
wValue6.grid(row=4,column=3)
button.grid(row=5,column=3) #quit button
clearbutton.grid(row=3,column=1)
print("ab")
root.protocol("WM_DELETE_WINDOW", _quit)
root.after(100,GetData)
root.after(100,RealtimePlottter)
print("ac")
Tk.mainloop()
print("ad")
| gpl-3.0 |
mutirri/bokeh | bokeh/session.py | 42 | 20253 | ''' The session module provides the Session class, which encapsulates a
connection to a Document that resides on a Bokeh server.
The Session class provides methods for creating, loading and storing
documents and objects, as well as methods for user-authentication. These
are useful when the server is run in multi-user mode.
'''
from __future__ import absolute_import, print_function
#--------
# logging
#--------
import logging
logger = logging.getLogger(__name__)
#-------------
# standard lib
#-------------
import time
import json
from os import makedirs
from os.path import expanduser, exists, join
import tempfile
#------------
# third party
#------------
from six.moves.urllib.parse import urlencode
from requests.exceptions import ConnectionError
#---------
# optional
#---------
try:
import pandas as pd
import tables
has_pandas = True
except ImportError as e:
has_pandas = False
#--------
# project
#--------
from . import browserlib
from . import protocol
from .embed import autoload_server
from .exceptions import DataIntegrityException
from .util.notebook import publish_display_data
from .util.serialization import dump, get_json, urljoin
DEFAULT_SERVER_URL = "http://localhost:5006/"
class Session(object):
""" Encapsulate a connection to a document stored on a Bokeh Server.
Args:
name (str, optional) : name of server
root_url (str, optional) : root url of server
userapikey (str, optional) : (default: "nokey")
username (str, optional) : (default: "defaultuser")
load_from_config (bool, optional) :
Whether to load login information from config. (default: True)
If False, then we may overwrite the user's config.
configdir (str) : location of user configuration information
Attributes:
base_url (str) :
configdir (str) :
configfile (str) :
http_session (requests.session) :
userapikey (str) :
userinfo (dict) :
username (str) :
"""
def __init__(
self,
name = DEFAULT_SERVER_URL,
root_url = DEFAULT_SERVER_URL,
userapikey = "nokey",
username = "defaultuser",
load_from_config = True,
configdir = None,
):
self.name = name
if not root_url.endswith("/"):
logger.warning("root_url should end with a /, adding one")
root_url = root_url + "/"
self.root_url = root_url
# single user mode case
self.userapikey = userapikey
self.username = username
self._configdir = None
if configdir:
self.configdir = configdir
if load_from_config:
self.load()
@property
def http_session(self):
if hasattr(self, "_http_session"):
return self._http_session
else:
import requests
self._http_session = requests.session()
return self._http_session
@property
def username(self):
return self.http_session.headers.get('BOKEHUSER')
@username.setter
def username(self, val):
self.http_session.headers.update({'BOKEHUSER': val})
@property
def userapikey(self):
return self.http_session.headers.get('BOKEHUSER-API-KEY')
@userapikey.setter
def userapikey(self, val):
self.http_session.headers.update({'BOKEHUSER-API-KEY': val})
@property
def configdir(self):
""" filename where our config are stored. """
if self._configdir:
return self._configdir
bokehdir = join(expanduser("~"), ".bokeh")
if not exists(bokehdir):
makedirs(bokehdir)
return bokehdir
# for testing
@configdir.setter
def configdir(self, path):
self._configdir = path
@property
def configfile(self):
return join(self.configdir, "config.json")
def load_dict(self):
configfile = self.configfile
if not exists(configfile):
data = {}
else:
with open(configfile, "r") as f:
data = json.load(f)
return data
def load(self):
""" Loads the server configuration information from disk
Returns:
None
"""
config_info = self.load_dict().get(self.name, {})
print("Using saved session configuration for %s" % self.name)
print("To override, pass 'load_from_config=False' to Session")
self.root_url = config_info.get('root_url', self.root_url)
self.userapikey = config_info.get('userapikey', self.userapikey)
self.username = config_info.get('username', self.username)
def save(self):
""" Save the server configuration information to JSON
Returns:
None
"""
data = self.load_dict()
data[self.name] = {'root_url': self.root_url,
'userapikey': self.userapikey,
'username': self.username}
configfile = self.configfile
with open(configfile, "w+") as f:
json.dump(data, f)
def register(self, username, password):
''' Register a new user with a bokeh server.
.. note::
This is useful in multi-user mode.
Args:
username (str) : user name to register
password (str) : user password for account
Returns:
None
'''
url = urljoin(self.root_url, "bokeh/register")
result = self.execute('post', url, data={
'username': username,
'password': password,
'api': 'true'
})
if result.status_code != 200:
raise RuntimeError("Unknown Error")
result = get_json(result)
if result['status']:
self.username = username
self.userapikey = result['userapikey']
self.save()
else:
raise RuntimeError(result['error'])
def login(self, username, password):
''' Log a user into a bokeh server.
.. note::
This is useful in multi-user mode.
Args:
username (str) : user name to log in
password (str) : user password
Returns:
None
'''
url = urljoin(self.root_url, "bokeh/login")
result = self.execute('post', url, data={
'username': username,
'password': password,
'api': 'true'
})
if result.status_code != 200:
raise RuntimeError("Unknown Error")
result = get_json(result)
if result['status']:
self.username = username
self.userapikey = result['userapikey']
self.save()
else:
raise RuntimeError(result['error'])
self.save()
def browser_login(self):
""" Open a browser with a token that logs the user into a bokeh server.
.. note::
This is useful in multi-user mode.
Return:
None
"""
controller = browserlib.get_browser_controller()
url = urljoin(self.root_url, "bokeh/loginfromapikey")
url += "?" + urlencode({'username': self.username,
'userapikey': self.userapikey})
controller.open(url)
def data_source(self, name, data):
""" Makes and uploads a server data source to the server.
.. note::
The server must be configured with a data directory.
Args:
name (str) : name for the data source object
data (pd.DataFrame or np.array) : data to upload
Returns:
a ServerDataSource
"""
raise NotImplementedError
def list_data(self):
""" Return all the data soruces on the server.
Returns:
sources : JSON
"""
raise NotImplementedError
def publish(self):
url = urljoin(self.root_url, "/bokeh/%s/publish" % self.docid)
self.post_json(url)
def execute(self, method, url, headers=None, **kwargs):
""" Execute an HTTP request using the current session.
Returns the response
Args:
method (string) : 'get' or 'post'
url (string) : url
headers (dict, optional) : any extra HTTP headers
Keyword Args:
Any extra arguments to pass into the requests library
Returns:
response
Returns the response
"""
import requests
import warnings
func = getattr(self.http_session, method)
try:
resp = func(url, headers=headers, **kwargs)
except requests.exceptions.ConnectionError as e:
warnings.warn("You need to start the bokeh-server to see this example.")
raise e
if resp.status_code == 409:
raise DataIntegrityException
if resp.status_code == 401:
raise Exception('HTTP Unauthorized accessing')
return resp
def execute_json(self, method, url, headers=None, **kwargs):
""" same as execute, except ensure that json content-type is
set in headers and interprets and returns the json response
"""
if headers is None:
headers = {}
headers['content-type'] = 'application/json'
resp = self.execute(method, url, headers=headers, **kwargs)
return get_json(resp)
def get_json(self, url, headers=None, **kwargs):
""" Return the result of an HTTP 'get'.
Args:
url (str) : the URL for the 'get' request
headers (dict, optional) : any extra HTTP headers
Keyword Args:
Any extra arguments to pass into the requests library
Returns:
response: JSON
"""
return self.execute_json('get', url, headers=headers, **kwargs)
def post_json(self, url, headers=None, **kwargs):
""" Return the result of an HTTP 'post'
Args:
url (str) : the URL for the 'get' request
headers (dict, optional) : any extra HTTP headers
Keyword Args:
Any extra arguments to pass into the requests library
Returns:
response: JSON
"""
return self.execute_json('post', url, headers=headers, **kwargs)
@property
def userinfo(self):
if not hasattr(self, "_userinfo"):
url = urljoin(self.root_url, 'bokeh/userinfo/')
self._userinfo = self.get_json(url)
return self._userinfo
@userinfo.setter
def userinfo(self, val):
self._userinfo = val
@property
def base_url(self):
return urljoin(self.root_url, "bokeh/bb/")
def get_api_key(self, docid):
""" Retrieve the document API key from the server.
Args:
docid (string) : docid of the document to retrive API key for
Returns:
apikey : string
"""
url = urljoin(self.root_url,"bokeh/getdocapikey/%s" % docid)
apikey = self.get_json(url)
if 'apikey' in apikey:
apikey = apikey['apikey']
logger.info('got read write apikey')
else:
apikey = apikey['readonlyapikey']
logger.info('got read only apikey')
return apikey
def find_doc(self, name):
""" Return the docid of the document with a title matching ``name``.
.. note::
Creates a new document with the given title if one is not found.
Args:
name (string) : name for the document
Returns:
docid : str
"""
docs = self.userinfo.get('docs')
matching = [x for x in docs if x.get('title') == name]
if len(matching) == 0:
logger.info("No documents found, creating new document '%s'" % name)
self.make_doc(name)
return self.find_doc(name)
elif len(matching) > 1:
logger.warning("Multiple documents with name '%s'" % name)
return matching[0]['docid']
def use_doc(self, name=None, docid=None):
""" Configure the session to use a given document.
Args:
name (str, optional) : name of the document to use
docid (str, optional) : id of the document to use
.. note::
only one of ``name`` or ``docid`` may be supplied.
Creates a document for with the given name if one is not present on
the server.
Returns:
None
"""
if docid is not None and name is not None:
raise ValueError("only one of 'name' or 'docid' can be supplied to use_doc(...)")
if docid:
self.docid = docid
else:
self.docid = self.find_doc(name)
self.apikey = self.get_api_key(self.docid)
def make_doc(self, title):
""" Makes a new document with the given title on the server
.. note:: user information is reloaded
Returns:
None
"""
url = urljoin(self.root_url,"bokeh/doc/")
data = protocol.serialize_json({'title' : title})
self.userinfo = self.post_json(url, data=data)
def pull(self, typename=None, objid=None):
""" Pull JSON objects from the server.
Returns a specific object if both ``typename`` and ``objid`` are
supplied. Otherwise, returns all objects for the currently configured
document.
This is a low-level function.
Args:
typename (str, optional) : name of the type of object to pull
objid (str, optional) : ID of the object to pull
.. note::
you must supply either ``typename`` AND ``objid`` or omit both.
Returns:
attrs : JSON
"""
if typename is None and objid is None:
url = urljoin(self.base_url, self.docid +"/")
attrs = self.get_json(url)
elif typename is None or objid is None:
raise ValueError("typename and objid must both be None, or neither.")
else:
url = urljoin(
self.base_url,
self.docid + "/" + typename + "/" + objid + "/"
)
attr = self.get_json(url)
attrs = [{
'type': typename,
'id': objid,
'attributes': attr
}]
return attrs
def push(self, *jsonobjs):
""" Push JSON objects to the server.
This is a low-level function.
Args:
*jsonobjs (JSON) : objects to push to the server
Returns:
None
"""
data = protocol.serialize_json(jsonobjs)
url = urljoin(self.base_url, self.docid + "/", "bulkupsert")
self.post_json(url, data=data)
def gc(self):
url = urljoin(self.base_url, self.docid + "/", "gc")
self.post_json(url)
# convenience functions to use a session and store/fetch from server
def load_document(self, doc):
""" Loads data for the session and merge with the given document.
Args:
doc (Document) : document to load data into
Returns:
None
"""
self.gc()
json_objs = self.pull()
doc.merge(json_objs)
doc.docid = self.docid
def load_object(self, obj, doc):
""" Update an object in a document with data pulled from the server.
Args:
obj (PlotObject) : object to be updated
doc (Document) : the object's document
Returns:
None
"""
assert obj._id in doc._models
attrs = self.pull(typename=obj.__view_model__, objid=obj._id)
doc.load(*attrs)
def store_document(self, doc, dirty_only=True):
""" Store a document on the server.
Returns the models that were actually pushed.
Args:
doc (Document) : the document to store
dirty_only (bool, optional) : whether to store only dirty objects. (default: True)
Returns:
models : list[PlotObject]
"""
doc._add_all()
models = doc._models.values()
if dirty_only:
models = [x for x in models if getattr(x, '_dirty', False)]
json_objs = doc.dump(*models)
self.push(*json_objs)
for model in models:
model._dirty = False
return models
def store_objects(self, *objs, **kwargs):
""" Store objects on the server
Returns the objects that were actually stored.
Args:
*objs (PlotObject) : objects to store
Keywords Args:
dirty_only (bool, optional) : whether to store only dirty objects. (default: True)
Returns:
models : set[PlotObject]
"""
models = set()
for obj in objs:
models.update(obj.references())
if kwargs.pop('dirty_only', True):
models = list(models)
json_objs = dump(models, self.docid)
self.push(*json_objs)
for model in models:
model._dirty = False
return models
def object_link(self, obj):
""" Return a URL to a server page that will render the given object.
Args:
obj (PlotObject) : object to render
Returns:
URL string
"""
link = "bokeh/doc/%s/%s" % (self.docid, obj._id)
return urljoin(self.root_url, link)
def show(self, obj):
""" Display an object as HTML in IPython using its display protocol.
Args:
obj (PlotObject) : object to display
Returns:
None
"""
data = {'text/html': autoload_server(obj, self)}
publish_display_data(data)
def poll_document(self, document, interval=0.5):
""" Periodically ask the server for updates to the `document`. """
try:
while True:
self.load_document(document)
time.sleep(interval)
except KeyboardInterrupt:
print()
except ConnectionError:
print("Connection to bokeh-server was terminated")
# helper methods
def _prep_data_source_df(self, name, dataframe):
name = tempfile.NamedTemporaryFile(prefix="bokeh_data",
suffix=".pandas").name
store = pd.HDFStore(name)
store.append("__data__", dataframe, format="table", data_columns=True)
store.close()
return name
def _prep_data_source_numpy(self, name, arr):
name = tempfile.NamedTemporaryFile(prefix="bokeh_data",
suffix=".table").name
store = tables.File(name, 'w')
store.createArray("/", "__data__", obj=arr)
store.close()
return name
class TestSession(Session):
"""Currently, register and login do not work, everything else should work
in theory, but we'll have to test this as we go along and convert tests
"""
def __init__(self, *args, **kwargs):
if 'load_from_config' not in kwargs:
kwargs['load_from_config'] = False
self.client = kwargs.pop('client')
self.headers = {}
super(TestSession, self).__init__(*args, **kwargs)
@property
def username(self):
return self.headers.get('BOKEHUSER')
@username.setter
def username(self, val):
self.headers.update({'BOKEHUSER': val})
@property
def userapikey(self):
return self.headers.get('BOKEHUSER-API-KEY')
@userapikey.setter
def userapikey(self, val):
self.headers.update({'BOKEHUSER-API-KEY': val})
def execute(self, method, url, headers=None, **kwargs):
if headers is None:
headers = {}
func = getattr(self.client, method)
resp = func(url, headers=headers, **kwargs)
if resp.status_code == 409:
raise DataIntegrityException
if resp.status_code == 401:
raise Exception('HTTP Unauthorized accessing')
return resp
| bsd-3-clause |
rayNymous/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/dviread.py | 69 | 29920 | """
An experimental module for reading dvi files output by TeX. Several
limitations make this not (currently) useful as a general-purpose dvi
preprocessor.
Interface::
dvi = Dvi(filename, 72)
for page in dvi: # iterate over pages
w, h, d = page.width, page.height, page.descent
for x,y,font,glyph,width in page.text:
fontname = font.texname
pointsize = font.size
...
for x,y,height,width in page.boxes:
...
"""
import errno
import matplotlib
import matplotlib.cbook as mpl_cbook
import numpy as np
import struct
import subprocess
_dvistate = mpl_cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
class Dvi(object):
"""
A dvi ("device-independent") file, as produced by TeX.
The current implementation only reads the first page and does not
even attempt to verify the postamble.
"""
def __init__(self, filename, dpi):
"""
Initialize the object. This takes the filename as input and
opens the file; actually reading the file happens when
iterating through the pages of the file.
"""
matplotlib.verbose.report('Dvi: ' + filename, 'debug')
self.file = open(filename, 'rb')
self.dpi = dpi
self.fonts = {}
self.state = _dvistate.pre
def __iter__(self):
"""
Iterate through the pages of the file.
Returns (text, pages) pairs, where:
text is a list of (x, y, fontnum, glyphnum, width) tuples
boxes is a list of (x, y, height, width) tuples
The coordinates are transformed into a standard Cartesian
coordinate system at the dpi value given when initializing.
The coordinates are floating point numbers, but otherwise
precision is not lost and coordinate values are not clipped to
integers.
"""
while True:
have_page = self._read()
if have_page:
yield self._output()
else:
break
def close(self):
"""
Close the underlying file if it is open.
"""
if not self.file.closed:
self.file.close()
def _output(self):
"""
Output the text and boxes belonging to the most recent page.
page = dvi._output()
"""
minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
maxy_pure = -np.inf
for elt in self.text + self.boxes:
if len(elt) == 4: # box
x,y,h,w = elt
e = 0 # zero depth
else: # glyph
x,y,font,g,w = elt
h = _mul2012(font._scale, font._tfm.height[g])
e = _mul2012(font._scale, font._tfm.depth[g])
minx = min(minx, x)
miny = min(miny, y - h)
maxx = max(maxx, x + w)
maxy = max(maxy, y + e)
maxy_pure = max(maxy_pure, y)
if self.dpi is None:
# special case for ease of debugging: output raw dvi coordinates
return mpl_cbook.Bunch(text=self.text, boxes=self.boxes,
width=maxx-minx, height=maxy_pure-miny,
descent=maxy-maxy_pure)
d = self.dpi / (72.27 * 2**16) # from TeX's "scaled points" to dpi units
text = [ ((x-minx)*d, (maxy-y)*d, f, g, w*d)
for (x,y,f,g,w) in self.text ]
boxes = [ ((x-minx)*d, (maxy-y)*d, h*d, w*d) for (x,y,h,w) in self.boxes ]
return mpl_cbook.Bunch(text=text, boxes=boxes,
width=(maxx-minx)*d,
height=(maxy_pure-miny)*d,
descent=(maxy-maxy_pure)*d)
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
while True:
byte = ord(self.file.read(1))
self._dispatch(byte)
# if self.state == _dvistate.inpage:
# matplotlib.verbose.report(
# 'Dvi._read: after %d at %f,%f' %
# (byte, self.h, self.v),
# 'debug-annoying')
if byte == 140: # end of page
return True
if self.state == _dvistate.post_post: # end of file
self.close()
return False
def _arg(self, nbytes, signed=False):
"""
Read and return an integer argument "nbytes" long.
Signedness is determined by the "signed" keyword.
"""
str = self.file.read(nbytes)
value = ord(str[0])
if signed and value >= 0x80:
value = value - 0x100
for i in range(1, nbytes):
value = 0x100*value + ord(str[i])
return value
def _dispatch(self, byte):
"""
Based on the opcode "byte", read the correct kinds of
arguments from the dvi file and call the method implementing
that opcode with those arguments.
"""
if 0 <= byte <= 127: self._set_char(byte)
elif byte == 128: self._set_char(self._arg(1))
elif byte == 129: self._set_char(self._arg(2))
elif byte == 130: self._set_char(self._arg(3))
elif byte == 131: self._set_char(self._arg(4, True))
elif byte == 132: self._set_rule(self._arg(4, True), self._arg(4, True))
elif byte == 133: self._put_char(self._arg(1))
elif byte == 134: self._put_char(self._arg(2))
elif byte == 135: self._put_char(self._arg(3))
elif byte == 136: self._put_char(self._arg(4, True))
elif byte == 137: self._put_rule(self._arg(4, True), self._arg(4, True))
elif byte == 138: self._nop()
elif byte == 139: self._bop(*[self._arg(4, True) for i in range(11)])
elif byte == 140: self._eop()
elif byte == 141: self._push()
elif byte == 142: self._pop()
elif byte == 143: self._right(self._arg(1, True))
elif byte == 144: self._right(self._arg(2, True))
elif byte == 145: self._right(self._arg(3, True))
elif byte == 146: self._right(self._arg(4, True))
elif byte == 147: self._right_w(None)
elif byte == 148: self._right_w(self._arg(1, True))
elif byte == 149: self._right_w(self._arg(2, True))
elif byte == 150: self._right_w(self._arg(3, True))
elif byte == 151: self._right_w(self._arg(4, True))
elif byte == 152: self._right_x(None)
elif byte == 153: self._right_x(self._arg(1, True))
elif byte == 154: self._right_x(self._arg(2, True))
elif byte == 155: self._right_x(self._arg(3, True))
elif byte == 156: self._right_x(self._arg(4, True))
elif byte == 157: self._down(self._arg(1, True))
elif byte == 158: self._down(self._arg(2, True))
elif byte == 159: self._down(self._arg(3, True))
elif byte == 160: self._down(self._arg(4, True))
elif byte == 161: self._down_y(None)
elif byte == 162: self._down_y(self._arg(1, True))
elif byte == 163: self._down_y(self._arg(2, True))
elif byte == 164: self._down_y(self._arg(3, True))
elif byte == 165: self._down_y(self._arg(4, True))
elif byte == 166: self._down_z(None)
elif byte == 167: self._down_z(self._arg(1, True))
elif byte == 168: self._down_z(self._arg(2, True))
elif byte == 169: self._down_z(self._arg(3, True))
elif byte == 170: self._down_z(self._arg(4, True))
elif 171 <= byte <= 234: self._fnt_num(byte-171)
elif byte == 235: self._fnt_num(self._arg(1))
elif byte == 236: self._fnt_num(self._arg(2))
elif byte == 237: self._fnt_num(self._arg(3))
elif byte == 238: self._fnt_num(self._arg(4, True))
elif 239 <= byte <= 242:
len = self._arg(byte-238)
special = self.file.read(len)
self._xxx(special)
elif 243 <= byte <= 246:
k = self._arg(byte-242, byte==246)
c, s, d, a, l = [ self._arg(x) for x in (4, 4, 4, 1, 1) ]
n = self.file.read(a+l)
self._fnt_def(k, c, s, d, a, l, n)
elif byte == 247:
i, num, den, mag, k = [ self._arg(x) for x in (1, 4, 4, 4, 1) ]
x = self.file.read(k)
self._pre(i, num, den, mag, x)
elif byte == 248: self._post()
elif byte == 249: self._post_post()
else:
raise ValueError, "unknown command: byte %d"%byte
def _pre(self, i, num, den, mag, comment):
if self.state != _dvistate.pre:
raise ValueError, "pre command in middle of dvi file"
if i != 2:
raise ValueError, "Unknown dvi format %d"%i
if num != 25400000 or den != 7227 * 2**16:
raise ValueError, "nonstandard units in dvi file"
# meaning: TeX always uses those exact values, so it
# should be enough for us to support those
# (There are 72.27 pt to an inch so 7227 pt =
# 7227 * 2**16 sp to 100 in. The numerator is multiplied
# by 10^5 to get units of 10**-7 meters.)
if mag != 1000:
raise ValueError, "nonstandard magnification in dvi file"
# meaning: LaTeX seems to frown on setting \mag, so
# I think we can assume this is constant
self.state = _dvistate.outer
def _set_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced set_char in dvi file"
self._put_char(char)
self.h += self.fonts[self.f]._width_of(char)
def _set_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced set_rule in dvi file"
self._put_rule(a, b)
self.h += b
def _put_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced put_char in dvi file"
font = self.fonts[self.f]
if font._vf is None:
self.text.append((self.h, self.v, font, char,
font._width_of(char)))
# matplotlib.verbose.report(
# 'Dvi._put_char: %d,%d %d' %(self.h, self.v, char),
# 'debug-annoying')
else:
scale = font._scale
for x, y, f, g, w in font._vf[char].text:
newf = DviFont(scale=_mul2012(scale, f._scale),
tfm=f._tfm, texname=f.texname, vf=f._vf)
self.text.append((self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
newf, g, newf._width_of(g)))
self.boxes.extend([(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
_mul2012(a, scale), _mul2012(b, scale))
for x, y, a, b in font._vf[char].boxes])
def _put_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced put_rule in dvi file"
if a > 0 and b > 0:
self.boxes.append((self.h, self.v, a, b))
# matplotlib.verbose.report(
# 'Dvi._put_rule: %d,%d %d,%d' % (self.h, self.v, a, b),
# 'debug-annoying')
def _nop(self):
pass
def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
if self.state != _dvistate.outer:
raise ValueError, \
"misplaced bop in dvi file (state %d)" % self.state
self.state = _dvistate.inpage
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack = []
self.text = [] # list of (x,y,fontnum,glyphnum)
self.boxes = [] # list of (x,y,width,height)
def _eop(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced eop in dvi file"
self.state = _dvistate.outer
del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
def _push(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced push in dvi file"
self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
def _pop(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced pop in dvi file"
self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
def _right(self, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced right in dvi file"
self.h += b
def _right_w(self, new_w):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced w in dvi file"
if new_w is not None:
self.w = new_w
self.h += self.w
def _right_x(self, new_x):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced x in dvi file"
if new_x is not None:
self.x = new_x
self.h += self.x
def _down(self, a):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced down in dvi file"
self.v += a
def _down_y(self, new_y):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced y in dvi file"
if new_y is not None:
self.y = new_y
self.v += self.y
def _down_z(self, new_z):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced z in dvi file"
if new_z is not None:
self.z = new_z
self.v += self.z
def _fnt_num(self, k):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced fnt_num in dvi file"
self.f = k
def _xxx(self, special):
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and ch
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
def _fnt_def(self, k, c, s, d, a, l, n):
tfm = _tfmfile(n[-l:])
if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
raise ValueError, 'tfm checksum mismatch: %s'%n
# It seems that the assumption behind the following check is incorrect:
#if d != tfm.design_size:
# raise ValueError, 'tfm design size mismatch: %d in dvi, %d in %s'%\
# (d, tfm.design_size, n)
vf = _vffile(n[-l:])
self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
def _post(self):
if self.state != _dvistate.outer:
raise ValueError, "misplaced post in dvi file"
self.state = _dvistate.post_post
# TODO: actually read the postamble and finale?
# currently post_post just triggers closing the file
def _post_post(self):
raise NotImplementedError
class DviFont(object):
"""
Object that holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are _not_ used for comparison.
The size is in Adobe points (converted from TeX points).
"""
__slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
def __init__(self, scale, tfm, texname, vf):
self._scale, self._tfm, self.texname, self._vf = \
scale, tfm, texname, vf
self.size = scale * (72.0 / (72.27 * 2**16))
try:
nchars = max(tfm.width.iterkeys())
except ValueError:
nchars = 0
self.widths = [ (1000*tfm.width.get(char, 0)) >> 20
for char in range(nchars) ]
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.texname == other.texname and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
def _width_of(self, char):
"""
Width of char in dvi units. For internal use by dviread.py.
"""
width = self._tfm.width.get(char, None)
if width is not None:
return _mul2012(width, self._scale)
matplotlib.verbose.report(
'No width for char %d in font %s' % (char, self.texname),
'debug')
return 0
class Vf(Dvi):
"""
A virtual font (\*.vf file) containing subroutines for dvi files.
Usage::
vf = Vf(filename)
glyph = vf[code]
glyph.text, glyph.boxes, glyph.width
"""
def __init__(self, filename):
Dvi.__init__(self, filename, 0)
self._first_font = None
self._chars = {}
self._packet_ends = None
self._read()
self.close()
def __getitem__(self, code):
return self._chars[code]
def _dispatch(self, byte):
# If we are in a packet, execute the dvi instructions
if self.state == _dvistate.inpage:
byte_at = self.file.tell()-1
if byte_at == self._packet_ends:
self._finalize_packet()
# fall through
elif byte_at > self._packet_ends:
raise ValueError, "Packet length mismatch in vf file"
else:
if byte in (139, 140) or byte >= 243:
raise ValueError, "Inappropriate opcode %d in vf file" % byte
Dvi._dispatch(self, byte)
return
# We are outside a packet
if byte < 242: # a short packet (length given by byte)
cc, tfm = self._arg(1), self._arg(3)
self._init_packet(byte, cc, tfm)
elif byte == 242: # a long packet
pl, cc, tfm = [ self._arg(x) for x in (4, 4, 4) ]
self._init_packet(pl, cc, tfm)
elif 243 <= byte <= 246:
Dvi._dispatch(self, byte)
elif byte == 247: # preamble
i, k = self._arg(1), self._arg(1)
x = self.file.read(k)
cs, ds = self._arg(4), self._arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
self.state = _dvistate.post_post
else:
raise ValueError, "unknown vf opcode %d" % byte
def _init_packet(self, pl, cc, tfm):
if self.state != _dvistate.outer:
raise ValueError, "Misplaced packet in vf file"
self.state = _dvistate.inpage
self._packet_ends = self.file.tell() + pl
self._packet_char = cc
self._packet_width = tfm
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack, self.text, self.boxes = [], [], []
self.f = self._first_font
def _finalize_packet(self):
self._chars[self._packet_char] = mpl_cbook.Bunch(
text=self.text, boxes=self.boxes, width = self._packet_width)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state != _dvistate.pre:
raise ValueError, "pre command in middle of vf file"
if i != 202:
raise ValueError, "Unknown vf format %d" % i
if len(x):
matplotlib.verbose.report('vf file comment: ' + x, 'debug')
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _fnt_def(self, k, *args):
Dvi._fnt_def(self, k, *args)
if self._first_font is None:
self._first_font = k
def _fix2comp(num):
"""
Convert from two's complement to negative.
"""
assert 0 <= num < 2**32
if num & 2**31:
return num - 2**32
else:
return num
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm(object):
"""
A TeX Font Metric file. This implementation covers only the bare
minimum needed by the Dvi class.
Attributes:
checksum: for verifying against dvi file
design_size: design size of the font (in what units?)
width[i]: width of character \#i, needs to be scaled
by the factor specified in the dvi file
(this is a dict because indexing may not start from 0)
height[i], depth[i]: height and depth of character \#i
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
matplotlib.verbose.report('opening tfm file ' + filename, 'debug')
file = open(filename, 'rb')
try:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = \
struct.unpack('!6H', header1[2:14])
matplotlib.verbose.report(
'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % (
lh, bc, ec, nw, nh, nd), 'debug')
header2 = file.read(4*lh)
self.checksum, self.design_size = \
struct.unpack('!2I', header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = file.read(4*nw)
heights = file.read(4*nh)
depths = file.read(4*nd)
finally:
file.close()
self.width, self.height, self.depth = {}, {}, {}
widths, heights, depths = \
[ struct.unpack('!%dI' % (len(x)/4), x)
for x in (widths, heights, depths) ]
for i in range(ec-bc):
self.width[bc+i] = _fix2comp(widths[ord(char_info[4*i])])
self.height[bc+i] = _fix2comp(heights[ord(char_info[4*i+1]) >> 4])
self.depth[bc+i] = _fix2comp(depths[ord(char_info[4*i+1]) & 0xf])
class PsfontsMap(object):
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Usage: map = PsfontsMap('.../psfonts.map'); map['cmr10']
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
psfonts.map, pdftex.map, dvipdfm.map. psfonts.map is used by
dvips, pdftex.map by pdfTeX, and dvipdfm.map by dvipdfm.
psfonts.map might avoid embedding the 35 PostScript fonts, while
the pdf-related files perhaps only avoid the "Base 14" pdf fonts.
But the user may have configured these files differently.
"""
__slots__ = ('_font',)
def __init__(self, filename):
self._font = {}
file = open(filename, 'rt')
try:
self._parse(file)
finally:
file.close()
def __getitem__(self, texname):
result = self._font[texname]
fn, enc = result.filename, result.encoding
if fn is not None and not fn.startswith('/'):
result.filename = find_tex_file(fn)
if enc is not None and not enc.startswith('/'):
result.encoding = find_tex_file(result.encoding)
return result
def _parse(self, file):
"""Parse each line into words."""
for line in file:
line = line.strip()
if line == '' or line.startswith('%'):
continue
words, pos = [], 0
while pos < len(line):
if line[pos] == '"': # double quoted word
pos += 1
end = line.index('"', pos)
words.append(line[pos:end])
pos = end + 1
else: # ordinary word
end = line.find(' ', pos+1)
if end == -1: end = len(line)
words.append(line[pos:end])
pos = end
while pos < len(line) and line[pos] == ' ':
pos += 1
self._register(words)
def _register(self, words):
"""Register a font described by "words".
The format is, AFAIK: texname fontname [effects and filenames]
Effects are PostScript snippets like ".177 SlantFont",
filenames begin with one or two less-than signs. A filename
ending in enc is an encoding file, other filenames are font
files. This can be overridden with a left bracket: <[foobar
indicates an encoding file named foobar.
There is some difference between <foo.pfb and <<bar.pfb in
subsetting, but I have no example of << in my TeX installation.
"""
texname, psname = words[:2]
effects, encoding, filename = [], None, None
for word in words[2:]:
if not word.startswith('<'):
effects.append(word)
else:
word = word.lstrip('<')
if word.startswith('['):
assert encoding is None
encoding = word[1:]
elif word.endswith('.enc'):
assert encoding is None
encoding = word
else:
assert filename is None
filename = word
self._font[texname] = mpl_cbook.Bunch(
texname=texname, psname=psname, effects=effects,
encoding=encoding, filename=filename)
class Encoding(object):
"""
Parses a \*.enc file referenced from a psfonts.map style file.
The format this class understands is a very limited subset of
PostScript.
Usage (subject to change)::
for name in Encoding(filename):
whatever(name)
"""
__slots__ = ('encoding',)
def __init__(self, filename):
file = open(filename, 'rt')
try:
matplotlib.verbose.report('Parsing TeX encoding ' + filename, 'debug-annoying')
self.encoding = self._parse(file)
matplotlib.verbose.report('Result: ' + `self.encoding`, 'debug-annoying')
finally:
file.close()
def __iter__(self):
for name in self.encoding:
yield name
def _parse(self, file):
result = []
state = 0
for line in file:
comment_start = line.find('%')
if comment_start > -1:
line = line[:comment_start]
line = line.strip()
if state == 0:
# Expecting something like /FooEncoding [
if '[' in line:
state = 1
line = line[line.index('[')+1:].strip()
if state == 1:
if ']' in line: # ] def
line = line[:line.index(']')]
state = 2
words = line.split()
for w in words:
if w.startswith('/'):
# Allow for /abc/def/ghi
subwords = w.split('/')
result.extend(subwords[1:])
else:
raise ValueError, "Broken name in encoding file: " + w
return result
def find_tex_file(filename, format=None):
"""
Call kpsewhich to find a file in the texmf tree.
If format is not None, it is used as the value for the --format option.
See the kpathsea documentation for more information.
Apparently most existing TeX distributions on Unix-like systems
use kpathsea. I hear MikTeX (a popular distribution on Windows)
doesn't use kpathsea, so what do we do? (TODO)
"""
cmd = ['kpsewhich']
if format is not None:
cmd += ['--format=' + format]
cmd += [filename]
matplotlib.verbose.report('find_tex_file(%s): %s' \
% (filename,cmd), 'debug')
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
result = pipe.communicate()[0].rstrip()
matplotlib.verbose.report('find_tex_file result: %s' % result,
'debug')
return result
def _read_nointr(pipe, bufsize=-1):
while True:
try:
return pipe.read(bufsize)
except OSError, e:
if e.errno == errno.EINTR:
continue
else:
raise
# With multiple text objects per figure (e.g. tick labels) we may end
# up reading the same tfm and vf files many times, so we implement a
# simple cache. TODO: is this worth making persistent?
_tfmcache = {}
_vfcache = {}
def _fontfile(texname, class_, suffix, cache):
try:
return cache[texname]
except KeyError:
pass
filename = find_tex_file(texname + suffix)
if filename:
result = class_(filename)
else:
result = None
cache[texname] = result
return result
def _tfmfile(texname):
return _fontfile(texname, Tfm, '.tfm', _tfmcache)
def _vffile(texname):
return _fontfile(texname, Vf, '.vf', _vfcache)
if __name__ == '__main__':
import sys
matplotlib.verbose.set_level('debug-annoying')
fname = sys.argv[1]
try: dpi = float(sys.argv[2])
except IndexError: dpi = None
dvi = Dvi(fname, dpi)
fontmap = PsfontsMap(find_tex_file('pdftex.map'))
for page in dvi:
print '=== new page ==='
fPrev = None
for x,y,f,c,w in page.text:
if f != fPrev:
print 'font', f.texname, 'scaled', f._scale/pow(2.0,20)
fPrev = f
print x,y,c, 32 <= c < 128 and chr(c) or '.', w
for x,y,w,h in page.boxes:
print x,y,'BOX',w,h
| agpl-3.0 |
gclenaghan/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
mverleg/paxsync_backups | cleanup.py | 2 | 3773 |
from argparse import ArgumentParser
from collections import OrderedDict
from datetime import datetime, timedelta
from os import listdir
from os.path import join
from random import randint
from re import findall
from shutil import rmtree
def find_backups(dir, dry=True):
for name in listdir(dir):
match = findall(r'backup_(\d+)-(\d+)-(\d+)-(\d+)-(\d+)-(\d+)', name)
if match:
dt = datetime(2000+int(match[0][0]), *tuple(int(v) for v in match[0][1:]))
if dry:
yield None, dt
else:
yield name, dt
def remove(backups, which, dir, is_removed):
print('remove {0:}'.format(backups[which][0] or backups[which][1]))
if backups[which][0]:
budir = join(dir, backups[which][0])
rmtree(budir)
is_removed[backups[which]] = True
backups.pop(which)
def get_scores(backups, now):
diffs = [0] * len(backups)
ago = [0] * len(backups)
for k, day1 in enumerate(backups):
age = abs((now - day1[1]).total_seconds()) / 86400
ago[k] = age
for day2 in backups:
diff = abs(int((day1[1] - day2[1]).total_seconds()))
if diff > 0:
score = 1.e6 / diff
diffs[k] += score
diffs[k] = (diffs[k] * age**0.5)
return ago, diffs
def get_top(collection):
topk, topval = None, -float('inf')
for k, val in enumerate(collection):
if val > topval:
topk, topval = k, val
if topk is None:
raise AssertionError('didn\'t find a maximum in {0:}'.format(collection))
return topk, topval
def get_args():
parser = ArgumentParser(description='Remove old backup files.')
parser.add_argument('directory', type=str, help='Directory where backups are stored (should be named e.g. backup_16-01-09-20-31-24)')
parser.add_argument('--maxage', type=int, default=90, help='Maximum age in days (older backups are always removed).')
parser.add_argument('--keep', type=int, default=10, help='How many backups to keep (any excess ones are removed).')
parser.add_argument('--plot', action='store_true', help='Show a plot of backups and scores.')
parser.add_argument('--demo', type=int, default=None, help='Use X demo data points instead (also implies --dry).')
parser.add_argument('--dry', action='store_true', help='Just show what to remove but don\'t actually do it.')
args = parser.parse_args()
assert args.keep >= 1
return args
def prune(args):
now = datetime.now()
if args.demo is not None:
backups = [(None, now - timedelta(days=int((0.03 * randint(0, int(100)))**4), seconds=randint(0, 86400))) for k in range(args.demo)]
else:
backups = list(find_backups(args.directory, args.dry))
is_removed = OrderedDict((backup, False) for backup in sorted(backups, key=lambda obj: obj[1]))
fig = ax = mp = None
if args.plot:
from matplotlib.pyplot import subplots
fig, ax = subplots(figsize=(8, 4))
ax.set_xlabel('Days ago')
ax.set_ylabel('Redundancy score')
ax.set_yscale('log')
original_ago, original_scores = get_scores(backups, now)
ax.scatter(original_ago, original_scores, color='red')
mp = {ago: score for ago, score in zip(original_ago, original_scores)}
for k in reversed(range(len(backups))):
if (now - backups[k][1]).total_seconds() > 24 * 60 * 60 * (args.maxage + 0.5):
remove(backups, k, args.directory, is_removed)
while len(backups) > args.keep:
ago, scores = get_scores(backups, now)
topk, topscore = get_top(scores)
remove(backups, topk, args.directory, is_removed)
print('keep name')
for backup, status in is_removed.items():
print(' {1:2s} {0:20s}'.format(backup[1].strftime('%Y-%m-%d %H:%M'), '' if status else '>>'))
if fig and ax and mp:
from matplotlib.pyplot import show
ago = get_scores(backups, now)[0]
ax.scatter(ago, tuple(mp[a] for a in ago), color='blue')
ax.set_ylim([min(mp.values()), max(mp.values())])
show()
if __name__ == '__main__':
prune(get_args())
| mit |
Just-CJ/SketchRetrieval | python/GFHOG.py | 1 | 3062 | # coding=utf-8
import cv2
import numpy as np
import sklearn
SKETCH = 1
IMAGE = 2
test = np.array([])
class GFHOG:
__gradient = None
__hog = None
def __init__(self):
self.__hog = cv2.HOGDescriptor('hog.xml')
def compute(self, img, t=SKETCH):
if t == SKETCH:
self.compute_sketch(img)
else:
self.compute_image(img)
def compute_sketch(self, img):
# 灰度化
if img.ndim > 2:
cv2.cvtColor(img, cv2.COLOR_BGR2GRAY, img)
# 计算梯度场
self.compute_gradient(img)
def compute_image(self, img):
# 灰度化
if img.ndim > 2:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 提取边缘
edge = np.zeros(img.shape, dtype=np.uint8)
for s in range(19, 1, -2):
cv2.GaussianBlur(img, (s, s), 0, edge)
cv2.Canny(edge, 100, 200, edge)
sum = cv2.sumElems(edge)
area = 1.0*sum[0] / (edge.size*255)
if area > 0.02:
break
cv2.bitwise_not(edge, edge)
# cv2.imshow("edge", edge)
# cv2.waitKey()
# 计算梯度场
self.compute_gradient(edge)
def compute_gradient(self, edge):
gradient = np.zeros(edge.shape, dtype=np.float32)
gradient = edge * np.float32(1.0 / 255.0)
# 计算梯度场
gradient = self.gradient_field(gradient)
cv2.bitwise_not(edge, edge)
self.__gradient = np.copy(gradient)
tmp = gradient*255.0
cv2.imwrite("5_gradient.jpg", np.array(tmp, dtype=np.uint8))
cv2.imshow("gradient", np.array(tmp, dtype=np.uint8))
cv2.waitKey()
# 计算hog
self.__hog.compute(gradient)
def gradient_field(self, gradient):
# 默认mask
mask = np.zeros(gradient.shape, dtype=np.uint8)
cv2.bitwise_not(mask, mask)
ww = gradient.shape[1]
hh = gradient.shape[0]
dx = np.zeros(gradient.shape, dtype=np.float32)
dy = np.zeros(gradient.shape, dtype=np.float32)
# 计算水平与竖直的差分
cv2.Sobel(gradient, cv2.CV_32F, 1, 0, dx)
cv2.Sobel(gradient, cv2.CV_32F, 0, 1, dy)
# 结果与mask相乘
# dx = np.multiply(mask, dx)
# dy = np.multiply(mask, dy)
# dx = cv2.multiply(mask, dx)
# dy = cv2.multiply(mask, dy)
mag = np.zeros(gradient.shape, dtype=np.float32)
ang = np.zeros(gradient.shape, dtype=np.float32)
# 转换到极坐标
cv2.cartToPolar(dx, dy, mag, ang)
mag = ang * np.float32(1.0/(2.0*np.pi))
return mag
def gradient(self):
return self.__gradient
if __name__ == '__main__':
im = cv2.imread('img/5.png')
# im = cv2.resize(im, (100, im.shape[0]*100/im.shape[1]))
# cv2.imwrite("5_scale.jpg", im)
# cv2.waitKey()
gfhog = GFHOG()
gfhog.compute(im, SKETCH) | mit |
harshaneelhg/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
CallaJun/hackprince | indico/matplotlib/tests/test_axes.py | 9 | 108913 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from nose.tools import assert_equal, assert_raises, assert_false, assert_true
import datetime
import numpy as np
from numpy import ma
import matplotlib
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
from numpy.testing import assert_array_equal
import warnings
@image_comparison(baseline_images=['formatter_ticker_001',
'formatter_ticker_002',
'formatter_ticker_003',
'formatter_ticker_004',
'formatter_ticker_005',
])
def test_formatter_ticker():
import matplotlib.testing.jpl_units as units
units.register()
# This should affect the tick size. (Tests issue #543)
matplotlib.rcParams['lines.markeredgewidth'] = 30
# This essentially test to see if user specified labels get overwritten
# by the auto labeler functionality of the axes.
xdata = [x*units.sec for x in range(10)]
ydata1 = [(1.5*y - 0.5)*units.km for y in range(10)]
ydata2 = [(1.75*y - 1.0)*units.km for y in range(10)]
fig = plt.figure()
ax = plt.subplot(111)
ax.set_xlabel("x-label 001")
fig = plt.figure()
ax = plt.subplot(111)
ax.set_xlabel("x-label 001")
ax.plot(xdata, ydata1, color='blue', xunits="sec")
fig = plt.figure()
ax = plt.subplot(111)
ax.set_xlabel("x-label 001")
ax.plot(xdata, ydata1, color='blue', xunits="sec")
ax.set_xlabel("x-label 003")
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(xdata, ydata1, color='blue', xunits="sec")
ax.plot(xdata, ydata2, color='green', xunits="hour")
ax.set_xlabel("x-label 004")
# See SF bug 2846058
# https://sourceforge.net/tracker/?func=detail&aid=2846058&group_id=80706&atid=560720
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(xdata, ydata1, color='blue', xunits="sec")
ax.plot(xdata, ydata2, color='green', xunits="hour")
ax.set_xlabel("x-label 005")
ax.autoscale_view()
@image_comparison(baseline_images=["formatter_large_small"])
def test_formatter_large_small():
# github issue #617, pull #619
fig, ax = plt.subplots(1)
x = [0.500000001, 0.500000002]
y = [1e64, 1.1e64]
ax.plot(x, y)
@image_comparison(baseline_images=["twin_axis_locaters_formatters"])
def test_twin_axis_locaters_formatters():
vals = np.linspace(0, 1, num=5, endpoint=True)
locs = np.sin(np.pi * vals / 2.0)
majl = plt.FixedLocator(locs)
minl = plt.FixedLocator([0.1, 0.2, 0.3])
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ax1.plot([0.1, 100], [0, 1])
ax1.yaxis.set_major_locator(majl)
ax1.yaxis.set_minor_locator(minl)
ax1.yaxis.set_major_formatter(plt.FormatStrFormatter('%08.2lf'))
ax1.yaxis.set_minor_formatter(plt.FixedFormatter(['tricks', 'mind', 'jedi']))
ax1.xaxis.set_major_locator(plt.LinearLocator())
ax1.xaxis.set_minor_locator(plt.FixedLocator([15, 35, 55, 75]))
ax1.xaxis.set_major_formatter(plt.FormatStrFormatter('%05.2lf'))
ax1.xaxis.set_minor_formatter(plt.FixedFormatter(['c', '3', 'p', 'o']))
ax2 = ax1.twiny()
ax3 = ax1.twinx()
@cleanup
def test_twinx_cla():
fig, ax = plt.subplots()
ax2 = ax.twinx()
ax3 = ax2.twiny()
plt.draw()
assert_false(ax2.xaxis.get_visible())
assert_false(ax2.patch.get_visible())
ax2.cla()
ax3.cla()
assert_false(ax2.xaxis.get_visible())
assert_false(ax2.patch.get_visible())
assert_true(ax2.yaxis.get_visible())
assert_true(ax3.xaxis.get_visible())
assert_false(ax3.patch.get_visible())
assert_false(ax3.yaxis.get_visible())
assert_true(ax.xaxis.get_visible())
assert_true(ax.patch.get_visible())
assert_true(ax.yaxis.get_visible())
@image_comparison(baseline_images=["autoscale_tiny_range"], remove_text=True)
def test_autoscale_tiny_range():
# github pull #904
fig, ax = plt.subplots(2, 2)
ax = ax.flatten()
for i in xrange(4):
y1 = 10**(-11 - i)
ax[i].plot([0, 1], [1, 1 + y1])
@image_comparison(baseline_images=['offset_points'],
remove_text=True)
def test_basic_annotate():
# Setup some data
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2.0*np.pi * t)
# Offset Points
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1, 5), ylim=(-3, 5))
line, = ax.plot(t, s, lw=3, color='purple')
ax.annotate('local max', xy=(3, 1), xycoords='data',
xytext=(3, 3), textcoords='offset points')
@image_comparison(baseline_images=['polar_axes'])
def test_polar_annotations():
# you can specify the xypoint and the xytext in different
# positions and coordinate systems, and optionally turn on a
# connecting line and mark the point with a marker. Annotations
# work on polar axes too. In the example below, the xy point is
# in native coordinates (xycoords defaults to 'data'). For a
# polar axes, this is in (theta, radius) space. The text in this
# example is placed in the fractional figure coordinate system.
# Text keyword args like horizontal and vertical alignment are
# respected
# Setup some data
r = np.arange(0.0, 1.0, 0.001)
theta = 2.0 * 2.0 * np.pi * r
fig = plt.figure()
ax = fig.add_subplot(111, polar=True)
line, = ax.plot(theta, r, color='#ee8d18', lw=3)
line, = ax.plot((0, 0), (0, 1), color="#0000ff", lw=1)
ind = 800
thisr, thistheta = r[ind], theta[ind]
ax.plot([thistheta], [thisr], 'o')
ax.annotate('a polar annotation',
xy=(thistheta, thisr), # theta, radius
xytext=(0.05, 0.05), # fraction, fraction
textcoords='figure fraction',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left',
verticalalignment='baseline',
)
@image_comparison(baseline_images=['polar_coords'],
remove_text=True)
def test_polar_coord_annotations():
# You can also use polar notation on a catesian axes. Here the
# native coordinate system ('data') is cartesian, so you need to
# specify the xycoords and textcoords as 'polar' if you want to
# use (theta, radius)
from matplotlib.patches import Ellipse
el = Ellipse((0, 0), 10, 20, facecolor='r', alpha=0.5)
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax.add_artist(el)
el.set_clip_box(ax.bbox)
ax.annotate('the top',
xy=(np.pi/2., 10.), # theta, radius
xytext=(np.pi/3, 20.), # theta, radius
xycoords='polar',
textcoords='polar',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left',
verticalalignment='baseline',
clip_on=True, # clip to the axes bounding box
)
ax.set_xlim(-20, 20)
ax.set_ylim(-20, 20)
@image_comparison(baseline_images=['fill_units'], tol=18, extensions=['png'],
savefig_kwarg={'dpi': 60})
def test_fill_units():
from datetime import datetime
import matplotlib.testing.jpl_units as units
units.register()
# generate some data
t = units.Epoch("ET", dt=datetime(2009, 4, 27))
value = 10.0 * units.deg
day = units.Duration("ET", 24.0 * 60.0 * 60.0)
fig = plt.figure()
# Top-Left
ax1 = fig.add_subplot(221)
ax1.plot([t], [value], yunits='deg', color='red')
ax1.fill([733525.0, 733525.0, 733526.0, 733526.0],
[0.0, 0.0, 90.0, 0.0], 'b')
# Top-Right
ax2 = fig.add_subplot(222)
ax2.plot([t], [value], yunits='deg', color='red')
ax2.fill([t, t, t+day, t+day],
[0.0, 0.0, 90.0, 0.0], 'b')
# Bottom-Left
ax3 = fig.add_subplot(223)
ax3.plot([t], [value], yunits='deg', color='red')
ax3.fill([733525.0, 733525.0, 733526.0, 733526.0],
[0*units.deg, 0*units.deg, 90*units.deg, 0*units.deg], 'b')
# Bottom-Right
ax4 = fig.add_subplot(224)
ax4.plot([t], [value], yunits='deg', color='red')
ax4.fill([t, t, t+day, t+day],
[0*units.deg, 0*units.deg, 90*units.deg, 0*units.deg],
facecolor="blue")
fig.autofmt_xdate()
@image_comparison(baseline_images=['single_point'])
def test_single_point():
# Issue #1796: don't let lines.marker affect the grid
matplotlib.rcParams['lines.marker'] = 'o'
matplotlib.rcParams['axes.grid'] = True
fig = plt.figure()
plt.subplot(211)
plt.plot([0], [0], 'o')
plt.subplot(212)
plt.plot([1], [1], 'o')
@image_comparison(baseline_images=['single_date'])
def test_single_date():
time1 = [721964.0]
data1 = [-65.54]
fig = plt.figure()
plt.subplot(211)
plt.plot_date(time1, data1, 'o', color='r')
plt.subplot(212)
plt.plot(time1, data1, 'o', color='r')
@image_comparison(baseline_images=['shaped_data'])
def test_shaped_data():
xdata = np.array([[0.53295185, 0.23052951, 0.19057629, 0.66724975, 0.96577916,
0.73136095, 0.60823287, 0.01792100, 0.29744742, 0.27164665],
[0.27980120, 0.25814229, 0.02818193, 0.12966456, 0.57446277,
0.58167607, 0.71028245, 0.69112737, 0.89923072, 0.99072476],
[0.81218578, 0.80464528, 0.76071809, 0.85616314, 0.12757994,
0.94324936, 0.73078663, 0.09658102, 0.60703967, 0.77664978],
[0.28332265, 0.81479711, 0.86985333, 0.43797066, 0.32540082,
0.43819229, 0.92230363, 0.49414252, 0.68168256, 0.05922372],
[0.10721335, 0.93904142, 0.79163075, 0.73232848, 0.90283839,
0.68408046, 0.25502302, 0.95976614, 0.59214115, 0.13663711],
[0.28087456, 0.33127607, 0.15530412, 0.76558121, 0.83389773,
0.03735974, 0.98717738, 0.71432229, 0.54881366, 0.86893953],
[0.77995937, 0.99555600, 0.29688434, 0.15646162, 0.05184800,
0.37161935, 0.12998491, 0.09377296, 0.36882507, 0.36583435],
[0.37851836, 0.05315792, 0.63144617, 0.25003433, 0.69586032,
0.11393988, 0.92362096, 0.88045438, 0.93530252, 0.68275072],
[0.86486596, 0.83236675, 0.82960664, 0.57796630, 0.25724233,
0.84841095, 0.90862812, 0.64414887, 0.35652720, 0.71026066],
[0.01383268, 0.34060930, 0.76084285, 0.70800694, 0.87634056,
0.08213693, 0.54655021, 0.98123181, 0.44080053, 0.86815815]])
y1 = np.arange(10)
y1.shape = 1, 10
y2 = np.arange(10)
y2.shape = 10, 1
fig = plt.figure()
plt.subplot(411)
plt.plot(y1)
plt.subplot(412)
plt.plot(y2)
plt.subplot(413)
assert_raises(ValueError, plt.plot, (y1, y2))
plt.subplot(414)
plt.plot(xdata[:, 1], xdata[1, :], 'o')
@image_comparison(baseline_images=['const_xy'])
def test_const_xy():
fig = plt.figure()
plt.subplot(311)
plt.plot(np.arange(10), np.ones((10,)))
plt.subplot(312)
plt.plot(np.ones((10,)), np.arange(10))
plt.subplot(313)
plt.plot(np.ones((10,)), np.ones((10,)), 'o')
@image_comparison(baseline_images=['polar_wrap_180',
'polar_wrap_360',
])
def test_polar_wrap():
D2R = np.pi / 180.0
fig = plt.figure()
plt.subplot(111, polar=True)
plt.polar([179*D2R, -179*D2R], [0.2, 0.1], "b.-")
plt.polar([179*D2R, 181*D2R], [0.2, 0.1], "g.-")
plt.rgrids([0.05, 0.1, 0.15, 0.2, 0.25, 0.3])
assert len(fig.axes) == 1, 'More than one polar axes created.'
fig = plt.figure()
plt.subplot(111, polar=True)
plt.polar([2*D2R, -2*D2R], [0.2, 0.1], "b.-")
plt.polar([2*D2R, 358*D2R], [0.2, 0.1], "g.-")
plt.polar([358*D2R, 2*D2R], [0.2, 0.1], "r.-")
plt.rgrids([0.05, 0.1, 0.15, 0.2, 0.25, 0.3])
@image_comparison(baseline_images=['polar_units', 'polar_units_2'])
def test_polar_units():
import matplotlib.testing.jpl_units as units
from nose.tools import assert_true
units.register()
pi = np.pi
deg = units.UnitDbl(1.0, "deg")
km = units.UnitDbl(1.0, "km")
x1 = [pi/6.0, pi/4.0, pi/3.0, pi/2.0]
x2 = [30.0*deg, 45.0*deg, 60.0*deg, 90.0*deg]
y1 = [1.0, 2.0, 3.0, 4.0]
y2 = [4.0, 3.0, 2.0, 1.0]
fig = plt.figure()
plt.polar(x2, y1, color="blue")
# polar(x2, y1, color = "red", xunits="rad")
# polar(x2, y2, color = "green")
fig = plt.figure()
# make sure runits and theta units work
y1 = [y*km for y in y1]
plt.polar(x2, y1, color="blue", thetaunits="rad", runits="km")
assert_true(isinstance(plt.gca().get_xaxis().get_major_formatter(), units.UnitDblFormatter))
@image_comparison(baseline_images=['polar_rmin'])
def test_polar_rmin():
r = np.arange(0, 3.0, 0.01)
theta = 2*np.pi*r
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
ax.plot(theta, r)
ax.set_rmax(2.0)
ax.set_rmin(0.5)
@image_comparison(baseline_images=['polar_theta_position'])
def test_polar_theta_position():
r = np.arange(0, 3.0, 0.01)
theta = 2*np.pi*r
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
ax.plot(theta, r)
ax.set_theta_zero_location("NW")
ax.set_theta_direction('clockwise')
@image_comparison(baseline_images=['polar_rlabel_position'])
def test_polar_rlabel_position():
fig = plt.figure()
ax = fig.add_subplot(111, projection='polar')
ax.set_rlabel_position(315)
@image_comparison(baseline_images=['axvspan_epoch'])
def test_axvspan_epoch():
from datetime import datetime
import matplotlib.testing.jpl_units as units
units.register()
# generate some data
t0 = units.Epoch("ET", dt=datetime(2009, 1, 20))
tf = units.Epoch("ET", dt=datetime(2009, 1, 21))
dt = units.Duration("ET", units.day.convert("sec"))
fig = plt.figure()
plt.axvspan(t0, tf, facecolor="blue", alpha=0.25)
ax = plt.gca()
ax.set_xlim(t0 - 5.0*dt, tf + 5.0*dt)
@image_comparison(baseline_images=['axhspan_epoch'])
def test_axhspan_epoch():
from datetime import datetime
import matplotlib.testing.jpl_units as units
units.register()
# generate some data
t0 = units.Epoch("ET", dt=datetime(2009, 1, 20))
tf = units.Epoch("ET", dt=datetime(2009, 1, 21))
dt = units.Duration("ET", units.day.convert("sec"))
fig = plt.figure()
plt.axhspan(t0, tf, facecolor="blue", alpha=0.25)
ax = plt.gca()
ax.set_ylim(t0 - 5.0*dt, tf + 5.0*dt)
@image_comparison(baseline_images=['hexbin_extent'],
remove_text=True, extensions=['png'])
def test_hexbin_extent():
# this test exposes sf bug 2856228
fig = plt.figure()
ax = fig.add_subplot(111)
data = np.arange(2000.)/2000.
data.shape = 2, 1000
x, y = data
ax.hexbin(x, y, extent=[.1, .3, .6, .7])
@cleanup
def test_hexbin_pickable():
# From #1973: Test that picking a hexbin collection works
class FauxMouseEvent:
def __init__(self, x, y):
self.x = x
self.y = y
fig = plt.figure()
ax = fig.add_subplot(111)
data = np.arange(200.)/200.
data.shape = 2, 100
x, y = data
hb = ax.hexbin(x, y, extent=[.1, .3, .6, .7], picker=1)
assert hb.contains(FauxMouseEvent(400, 300))[0]
@image_comparison(baseline_images=['hexbin_log'],
remove_text=True,
extensions=['png'])
def test_hexbin_log():
# Issue #1636
fig = plt.figure()
np.random.seed(0)
n = 100000
x = np.random.standard_normal(n)
y = 2.0 + 3.0 * x + 4.0 * np.random.standard_normal(n)
y = np.power(2, y * 0.5)
ax = fig.add_subplot(111)
ax.hexbin(x, y, yscale='log')
@cleanup
def test_inverted_limits():
# Test gh:1553
# Calling invert_xaxis prior to plotting should not disable autoscaling
# while still maintaining the inverted direction
fig = plt.figure()
ax = fig.gca()
ax.invert_xaxis()
ax.plot([-5, -3, 2, 4], [1, 2, -3, 5])
assert ax.get_xlim() == (4, -5)
assert ax.get_ylim() == (-3, 5)
plt.close()
fig = plt.figure()
ax = fig.gca()
ax.invert_yaxis()
ax.plot([-5, -3, 2, 4], [1, 2, -3, 5])
assert ax.get_xlim() == (-5, 4)
assert ax.get_ylim() == (5, -3)
plt.close()
@image_comparison(baseline_images=['nonfinite_limits'])
def test_nonfinite_limits():
x = np.arange(0., np.e, 0.01)
olderr = np.seterr(divide='ignore') # silence divide by zero warning from log(0)
try:
y = np.log(x)
finally:
np.seterr(**olderr)
x[len(x)//2] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
@image_comparison(baseline_images=['imshow'],
remove_text=True)
def test_imshow():
#Create a NxN image
N = 100
(x, y) = np.indices((N, N))
x -= N//2
y -= N//2
r = np.sqrt(x**2+y**2-x*y)
#Create a contour plot at N/4 and extract both the clip path and transform
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(r)
@image_comparison(baseline_images=['imshow_clip'])
def test_imshow_clip():
# As originally reported by Gellule Xg <gellule.xg@free.fr>
#Create a NxN image
N = 100
(x, y) = np.indices((N, N))
x -= N//2
y -= N//2
r = np.sqrt(x**2+y**2-x*y)
#Create a contour plot at N/4 and extract both the clip path and transform
fig = plt.figure()
ax = fig.add_subplot(111)
c = ax.contour(r, [N/4])
x = c.collections[0]
clipPath = x.get_paths()[0]
clipTransform = x.get_transform()
from matplotlib.transforms import TransformedPath
clip_path = TransformedPath(clipPath, clipTransform)
#Plot the image clipped by the contour
ax.imshow(r, clip_path=clip_path)
@image_comparison(baseline_images=['polycollection_joinstyle'],
remove_text=True)
def test_polycollection_joinstyle():
# Bug #2890979 reported by Matthew West
from matplotlib import collections as mcoll
fig = plt.figure()
ax = fig.add_subplot(111)
verts = np.array([[1, 1], [1, 2], [2, 2], [2, 1]])
c = mcoll.PolyCollection([verts], linewidths=40)
ax.add_collection(c)
ax.set_xbound(0, 3)
ax.set_ybound(0, 3)
@image_comparison(baseline_images=['fill_between_interpolate'],
remove_text=True)
def test_fill_between_interpolate():
x = np.arange(0.0, 2, 0.02)
y1 = np.sin(2*np.pi*x)
y2 = 1.2*np.sin(4*np.pi*x)
fig = plt.figure()
ax = fig.add_subplot(211)
ax.plot(x, y1, x, y2, color='black')
ax.fill_between(x, y1, y2, where=y2 >= y1, facecolor='white', hatch='/', interpolate=True)
ax.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red', interpolate=True)
# Test support for masked arrays.
y2 = np.ma.masked_greater(y2, 1.0)
# Test that plotting works for masked arrays with the first element masked
y2[0] = np.ma.masked
ax1 = fig.add_subplot(212, sharex=ax)
ax1.plot(x, y1, x, y2, color='black')
ax1.fill_between(x, y1, y2, where=y2 >= y1, facecolor='green', interpolate=True)
ax1.fill_between(x, y1, y2, where=y2 <= y1, facecolor='red', interpolate=True)
@image_comparison(baseline_images=['symlog'])
def test_symlog():
x = np.array([0, 1, 2, 4, 6, 9, 12, 24])
y = np.array([1000000, 500000, 100000, 100, 5, 0, 0, 0])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_yscale('symlog')
ax.set_xscale = ('linear')
ax.set_ylim(-1, 10000000)
@image_comparison(baseline_images=['symlog2'],
remove_text=True)
def test_symlog2():
# Numbers from -50 to 50, with 0.1 as step
x = np.arange(-50, 50, 0.001)
fig = plt.figure()
ax = fig.add_subplot(511)
# Plots a simple linear function 'f(x) = x'
ax.plot(x, x)
ax.set_xscale('symlog', linthreshx=20.0)
ax.grid(True)
ax = fig.add_subplot(512)
# Plots a simple linear function 'f(x) = x'
ax.plot(x, x)
ax.set_xscale('symlog', linthreshx=2.0)
ax.grid(True)
ax = fig.add_subplot(513)
# Plots a simple linear function 'f(x) = x'
ax.plot(x, x)
ax.set_xscale('symlog', linthreshx=1.0)
ax.grid(True)
ax = fig.add_subplot(514)
# Plots a simple linear function 'f(x) = x'
ax.plot(x, x)
ax.set_xscale('symlog', linthreshx=0.1)
ax.grid(True)
ax = fig.add_subplot(515)
# Plots a simple linear function 'f(x) = x'
ax.plot(x, x)
ax.set_xscale('symlog', linthreshx=0.01)
ax.grid(True)
ax.set_ylim(-0.1, 0.1)
@image_comparison(baseline_images=['pcolormesh'], remove_text=True)
def test_pcolormesh():
n = 12
x = np.linspace(-1.5, 1.5, n)
y = np.linspace(-1.5, 1.5, n*2)
X, Y = np.meshgrid(x, y)
Qx = np.cos(Y) - np.cos(X)
Qz = np.sin(Y) + np.sin(X)
Qx = (Qx + 1.1)
Z = np.sqrt(X**2 + Y**2)/5
Z = (Z - Z.min()) / (Z.max() - Z.min())
# The color array can include masked values:
Zm = ma.masked_where(np.fabs(Qz) < 0.5*np.amax(Qz), Z)
fig = plt.figure()
ax = fig.add_subplot(131)
ax.pcolormesh(Qx, Qz, Z, lw=0.5, edgecolors='k')
ax = fig.add_subplot(132)
ax.pcolormesh(Qx, Qz, Z, lw=2, edgecolors=['b', 'w'])
ax = fig.add_subplot(133)
ax.pcolormesh(Qx, Qz, Z, shading="gouraud")
@image_comparison(baseline_images=['pcolormesh_datetime_axis'],
extensions=['png'], remove_text=False)
def test_pcolormesh_datetime_axis():
fig = plt.figure()
fig.subplots_adjust(hspace=0.4, top=0.98, bottom=.15)
base = datetime.datetime(2013, 1, 1)
x = np.array([base + datetime.timedelta(days=d) for d in range(21)])
y = np.arange(21)
z1, z2 = np.meshgrid(np.arange(20), np.arange(20))
z = z1 * z2
plt.subplot(221)
plt.pcolormesh(x[:-1], y[:-1], z)
plt.subplot(222)
plt.pcolormesh(x, y, z)
x = np.repeat(x[np.newaxis], 21, axis=0)
y = np.repeat(y[:, np.newaxis], 21, axis=1)
plt.subplot(223)
plt.pcolormesh(x[:-1, :-1], y[:-1, :-1], z)
plt.subplot(224)
plt.pcolormesh(x, y, z)
for ax in fig.get_axes():
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
@image_comparison(baseline_images=['pcolor_datetime_axis'],
extensions=['png'], remove_text=False)
def test_pcolor_datetime_axis():
fig = plt.figure()
fig.subplots_adjust(hspace=0.4, top=0.98, bottom=.15)
base = datetime.datetime(2013, 1, 1)
x = np.array([base + datetime.timedelta(days=d) for d in range(21)])
y = np.arange(21)
z1, z2 = np.meshgrid(np.arange(20), np.arange(20))
z = z1 * z2
plt.subplot(221)
plt.pcolor(x[:-1], y[:-1], z)
plt.subplot(222)
plt.pcolor(x, y, z)
x = np.repeat(x[np.newaxis], 21, axis=0)
y = np.repeat(y[:, np.newaxis], 21, axis=1)
plt.subplot(223)
plt.pcolor(x[:-1, :-1], y[:-1, :-1], z)
plt.subplot(224)
plt.pcolor(x, y, z)
for ax in fig.get_axes():
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
@cleanup
def test_pcolorargs():
n = 12
x = np.linspace(-1.5, 1.5, n)
y = np.linspace(-1.5, 1.5, n*2)
X, Y = np.meshgrid(x, y)
Z = np.sqrt(X**2 + Y**2)/5
_, ax = plt.subplots()
assert_raises(TypeError, ax.pcolormesh, y, x, Z)
assert_raises(TypeError, ax.pcolormesh, X, Y, Z.T)
assert_raises(TypeError, ax.pcolormesh, x, y, Z[:-1, :-1],
shading="gouraud")
assert_raises(TypeError, ax.pcolormesh, X, Y, Z[:-1, :-1],
shading="gouraud")
@image_comparison(baseline_images=['canonical'])
def test_canonical():
fig, ax = plt.subplots()
ax.plot([1, 2, 3])
@image_comparison(baseline_images=['arc_ellipse'],
remove_text=True)
def test_arc_ellipse():
from matplotlib import patches
xcenter, ycenter = 0.38, 0.52
width, height = 1e-1, 3e-1
angle = -30
theta = np.arange(0.0, 360.0, 1.0)*np.pi/180.0
x = width/2. * np.cos(theta)
y = height/2. * np.sin(theta)
rtheta = angle*np.pi/180.
R = np.array([
[np.cos(rtheta), -np.sin(rtheta)],
[np.sin(rtheta), np.cos(rtheta)],
])
x, y = np.dot(R, np.array([x, y]))
x += xcenter
y += ycenter
fig = plt.figure()
ax = fig.add_subplot(211, aspect='auto')
ax.fill(x, y, alpha=0.2, facecolor='yellow', edgecolor='yellow', linewidth=1, zorder=1)
e1 = patches.Arc((xcenter, ycenter), width, height,
angle=angle, linewidth=2, fill=False, zorder=2)
ax.add_patch(e1)
ax = fig.add_subplot(212, aspect='equal')
ax.fill(x, y, alpha=0.2, facecolor='green', edgecolor='green', zorder=1)
e2 = patches.Arc((xcenter, ycenter), width, height,
angle=angle, linewidth=2, fill=False, zorder=2)
ax.add_patch(e2)
@image_comparison(baseline_images=['units_strings'])
def test_units_strings():
# Make sure passing in sequences of strings doesn't cause the unit
# conversion registry to recurse infinitely
Id = ['50', '100', '150', '200', '250']
pout = ['0', '7.4', '11.4', '14.2', '16.3']
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(Id, pout)
@image_comparison(baseline_images=['markevery'],
remove_text=True)
def test_markevery():
x = np.linspace(0, 10, 100)
y = np.sin(x) * np.sqrt(x/10 + 0.5)
# check marker only plot
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, 'o', label='default')
ax.plot(x, y, 'd', markevery=None, label='mark all')
ax.plot(x, y, 's', markevery=10, label='mark every 10')
ax.plot(x, y, '+', markevery=(5, 20), label='mark every 5 starting at 10')
ax.legend()
@image_comparison(baseline_images=['markevery_line'],
remove_text=True)
def test_markevery_line():
x = np.linspace(0, 10, 100)
y = np.sin(x) * np.sqrt(x/10 + 0.5)
# check line/marker combos
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, '-o', label='default')
ax.plot(x, y, '-d', markevery=None, label='mark all')
ax.plot(x, y, '-s', markevery=10, label='mark every 10')
ax.plot(x, y, '-+', markevery=(5, 20), label='mark every 5 starting at 10')
ax.legend()
@image_comparison(baseline_images=['markevery_linear_scales'],
remove_text=True)
def test_markevery_linear_scales():
cases = [None,
8,
(30, 8),
[16, 24, 30], [0,-1],
slice(100, 200, 3),
0.1, 0.3, 1.5,
(0.0, 0.1), (0.45, 0.1)]
cols = 3
gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)
delta = 0.11
x = np.linspace(0, 10 - 2 * delta, 200) + delta
y = np.sin(x) + 1.0 + delta
for i, case in enumerate(cases):
row = (i // cols)
col = i % cols
plt.subplot(gs[row, col])
plt.title('markevery=%s' % str(case))
plt.plot(x, y, 'o', ls='-', ms=4, markevery=case)
@image_comparison(baseline_images=['markevery_linear_scales_zoomed'],
remove_text=True)
def test_markevery_linear_scales_zoomed():
cases = [None,
8,
(30, 8),
[16, 24, 30], [0,-1],
slice(100, 200, 3),
0.1, 0.3, 1.5,
(0.0, 0.1), (0.45, 0.1)]
cols = 3
gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)
delta = 0.11
x = np.linspace(0, 10 - 2 * delta, 200) + delta
y = np.sin(x) + 1.0 + delta
for i, case in enumerate(cases):
row = (i // cols)
col = i % cols
plt.subplot(gs[row, col])
plt.title('markevery=%s' % str(case))
plt.plot(x, y, 'o', ls='-', ms=4, markevery=case)
plt.xlim((6, 6.7))
plt.ylim((1.1, 1.7))
@image_comparison(baseline_images=['markevery_log_scales'],
remove_text=True)
def test_markevery_log_scales():
cases = [None,
8,
(30, 8),
[16, 24, 30], [0,-1],
slice(100, 200, 3),
0.1, 0.3, 1.5,
(0.0, 0.1), (0.45, 0.1)]
cols = 3
gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)
delta = 0.11
x = np.linspace(0, 10 - 2 * delta, 200) + delta
y = np.sin(x) + 1.0 + delta
for i, case in enumerate(cases):
row = (i // cols)
col = i % cols
plt.subplot(gs[row, col])
plt.title('markevery=%s' % str(case))
plt.xscale('log')
plt.yscale('log')
plt.plot(x, y, 'o', ls='-', ms=4, markevery=case)
@image_comparison(baseline_images=['markevery_polar'],
remove_text=True)
def test_markevery_polar():
cases = [None,
8,
(30, 8),
[16, 24, 30], [0,-1],
slice(100, 200, 3),
0.1, 0.3, 1.5,
(0.0, 0.1), (0.45, 0.1)]
cols = 3
gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)
r = np.linspace(0, 3.0, 200)
theta = 2 * np.pi * r
for i, case in enumerate(cases):
row = (i // cols)
col = i % cols
plt.subplot(gs[row, col], polar = True)
plt.title('markevery=%s' % str(case))
plt.plot(theta, r, 'o', ls='-', ms=4, markevery=case)
@image_comparison(baseline_images=['marker_edges'],
remove_text=True, tol=3)
def test_marker_edges():
x = np.linspace(0, 1, 10)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, np.sin(x), 'y.', ms=30.0, mew=0, mec='r')
ax.plot(x+0.1, np.sin(x), 'y.', ms=30.0, mew=1, mec='r')
ax.plot(x+0.2, np.sin(x), 'y.', ms=30.0, mew=2, mec='b')
@image_comparison(baseline_images=['hist_log'],
remove_text=True)
def test_hist_log():
data0 = np.linspace(0, 1, 200)**3
data = np.r_[1-data0, 1+data0]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(data, fill=False, log=True)
@image_comparison(baseline_images=['hist_steplog'], remove_text=True)
def test_hist_steplog():
np.random.seed(0)
data = np.random.standard_normal(2000)
data += -2.0 - np.min(data)
data_pos = data + 2.1
data_big = data_pos + 30
weights = np.ones_like(data) * 1.e-5
ax = plt.subplot(4, 1, 1)
plt.hist(data, 100, histtype='stepfilled', log=True)
ax = plt.subplot(4, 1, 2)
plt.hist(data_pos, 100, histtype='stepfilled', log=True)
ax = plt.subplot(4, 1, 3)
plt.hist(data, 100, weights=weights, histtype='stepfilled', log=True)
ax = plt.subplot(4, 1, 4)
plt.hist(data_big, 100, histtype='stepfilled', log=True, orientation='horizontal')
def contour_dat():
x = np.linspace(-3, 5, 150)
y = np.linspace(-3, 5, 120)
z = np.cos(x) + np.sin(y[:, np.newaxis])
return x, y, z
@image_comparison(baseline_images=['contour_hatching'])
def test_contour_hatching():
x, y, z = contour_dat()
fig = plt.figure()
ax = fig.add_subplot(111)
cs = ax.contourf(x, y, z, hatches=['-', '/', '\\', '//'],
cmap=plt.get_cmap('gray'),
extend='both', alpha=0.5)
@image_comparison(baseline_images=['contour_colorbar'])
def test_contour_colorbar():
x, y, z = contour_dat()
fig = plt.figure()
ax = fig.add_subplot(111)
cs = ax.contourf(x, y, z, levels=np.arange(-1.8, 1.801, 0.2),
cmap=plt.get_cmap('RdBu'),
vmin=-0.6,
vmax=0.6,
extend='both')
cs1 = ax.contour(x, y, z, levels=np.arange(-2.2, -0.599, 0.2),
colors=['y'],
linestyles='solid',
linewidths=2)
cs2 = ax.contour(x, y, z, levels=np.arange(0.6, 2.2, 0.2),
colors=['c'],
linewidths=2)
cbar = fig.colorbar(cs, ax=ax)
cbar.add_lines(cs1)
cbar.add_lines(cs2, erase=False)
@image_comparison(baseline_images=['hist2d'])
def test_hist2d():
np.random.seed(0)
#make it not symetric in case we switch x and y axis
x = np.random.randn(100)*2+5
y = np.random.randn(100)-2
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist2d(x, y, bins=10)
@image_comparison(baseline_images=['hist2d_transpose'])
def test_hist2d_transpose():
np.random.seed(0)
#make sure the the output from np.histogram is transposed before
#passing to pcolorfast
x = np.array([5]*100)
y = np.random.randn(100)-2
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist2d(x, y, bins=10)
@image_comparison(baseline_images=['scatter'])
def test_scatter_plot():
ax = plt.axes()
ax.scatter([3, 4, 2, 6], [2, 5, 2, 3], c=['r', 'y', 'b', 'lime'], s=[24, 15, 19, 29])
@cleanup
def test_as_mpl_axes_api():
# tests the _as_mpl_axes api
from matplotlib.projections.polar import PolarAxes
import matplotlib.axes as maxes
class Polar(object):
def __init__(self):
self.theta_offset = 0
def _as_mpl_axes(self):
# implement the matplotlib axes interface
return PolarAxes, {'theta_offset': self.theta_offset}
prj = Polar()
prj2 = Polar()
prj2.theta_offset = np.pi
prj3 = Polar()
# testing axes creation with plt.axes
ax = plt.axes([0, 0, 1, 1], projection=prj)
assert type(ax) == PolarAxes, \
'Expected a PolarAxes, got %s' % type(ax)
ax_via_gca = plt.gca(projection=prj)
assert ax_via_gca is ax
plt.close()
# testing axes creation with gca
ax = plt.gca(projection=prj)
assert type(ax) == maxes._subplots._subplot_classes[PolarAxes], \
'Expected a PolarAxesSubplot, got %s' % type(ax)
ax_via_gca = plt.gca(projection=prj)
assert ax_via_gca is ax
# try getting the axes given a different polar projection
ax_via_gca = plt.gca(projection=prj2)
assert ax_via_gca is not ax
assert ax.get_theta_offset() == 0, ax.get_theta_offset()
assert ax_via_gca.get_theta_offset() == np.pi, ax_via_gca.get_theta_offset()
# try getting the axes given an == (not is) polar projection
ax_via_gca = plt.gca(projection=prj3)
assert ax_via_gca is ax
plt.close()
# testing axes creation with subplot
ax = plt.subplot(121, projection=prj)
assert type(ax) == maxes._subplots._subplot_classes[PolarAxes], \
'Expected a PolarAxesSubplot, got %s' % type(ax)
plt.close()
@image_comparison(baseline_images=['log_scales'])
def test_log_scales():
fig = plt.figure()
ax = plt.gca()
plt.plot(np.log(np.linspace(0.1, 100)))
ax.set_yscale('log', basey=5.5)
ax.invert_yaxis()
ax.set_xscale('log', basex=9.0)
@image_comparison(baseline_images=['stackplot_test_image'])
def test_stackplot():
fig = plt.figure()
x = np.linspace(0, 10, 10)
y1 = 1.0 * x
y2 = 2.0 * x + 1
y3 = 3.0 * x + 2
ax = fig.add_subplot(1, 1, 1)
ax.stackplot(x, y1, y2, y3)
ax.set_xlim((0, 10))
ax.set_ylim((0, 70))
@image_comparison(baseline_images=['stackplot_test_baseline'],
remove_text=True)
def test_stackplot_baseline():
np.random.seed(0)
def layers(n, m):
def bump(a):
x = 1 / (.1 + np.random.random())
y = 2 * np.random.random() - .5
z = 10 / (.1 + np.random.random())
for i in range(m):
w = (i / float(m) - y) * z
a[i] += x * np.exp(-w * w)
a = np.zeros((m, n))
for i in range(n):
for j in range(5):
bump(a[:, i])
return a
d = layers(3, 100)
fig = plt.figure()
plt.subplot(2, 2, 1)
plt.stackplot(list(xrange(100)), d.T, baseline='zero')
plt.subplot(2, 2, 2)
plt.stackplot(list(xrange(100)), d.T, baseline='sym')
plt.subplot(2, 2, 3)
plt.stackplot(list(xrange(100)), d.T, baseline='wiggle')
plt.subplot(2, 2, 4)
plt.stackplot(list(xrange(100)), d.T, baseline='weighted_wiggle')
@image_comparison(baseline_images=['bxp_baseline'],
extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_baseline():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats)
@image_comparison(baseline_images=['bxp_rangewhis'],
extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_rangewhis():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4)),
whis='range'
)
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats)
@image_comparison(baseline_images=['bxp_precentilewhis'],
extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_precentilewhis():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4)),
whis=[5, 95]
)
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats)
@image_comparison(baseline_images=['bxp_with_xlabels'],
extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_with_xlabels():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
for stats, label in zip(logstats, list('ABCD')):
stats['label'] = label
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats)
@image_comparison(baseline_images=['bxp_horizontal'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_horizontal():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_xscale('log')
ax.bxp(logstats, vert=False)
@image_comparison(baseline_images=['bxp_with_ylabels'],
extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_with_ylabels():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
for stats, label in zip(logstats, list('ABCD')):
stats['label'] = label
fig, ax = plt.subplots()
ax.set_xscale('log')
ax.bxp(logstats, vert=False)
@image_comparison(baseline_images=['bxp_patchartist'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_patchartist():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats, patch_artist=True)
@image_comparison(baseline_images=['bxp_custompatchartist'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_custompatchartist():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
boxprops = dict(facecolor='yellow', edgecolor='green', linestyle='dotted')
ax.bxp(logstats, patch_artist=True, boxprops=boxprops)
@image_comparison(baseline_images=['bxp_customoutlier'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_customoutlier():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
flierprops = dict(linestyle='none', marker='d', markerfacecolor='g')
ax.bxp(logstats, flierprops=flierprops)
@image_comparison(baseline_images=['bxp_withmean_custompoint'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_showcustommean():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
meanprops = dict(linestyle='none', marker='d', markerfacecolor='green')
ax.bxp(logstats, showmeans=True, meanprops=meanprops)
@image_comparison(baseline_images=['bxp_custombox'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_custombox():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
boxprops = dict(linestyle='--', color='b', linewidth=3)
ax.bxp(logstats, boxprops=boxprops)
@image_comparison(baseline_images=['bxp_custommedian'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_custommedian():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
medianprops = dict(linestyle='--', color='b', linewidth=3)
ax.bxp(logstats, medianprops=medianprops)
@image_comparison(baseline_images=['bxp_customcap'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_customcap():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
capprops = dict(linestyle='--', color='g', linewidth=3)
ax.bxp(logstats, capprops=capprops)
@image_comparison(baseline_images=['bxp_customwhisker'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_customwhisker():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
whiskerprops = dict(linestyle='-', color='m', linewidth=3)
ax.bxp(logstats, whiskerprops=whiskerprops)
@image_comparison(baseline_images=['bxp_withnotch'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_shownotches():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats, shownotches=True)
@image_comparison(baseline_images=['bxp_nocaps'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_nocaps():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats, showcaps=False)
@image_comparison(baseline_images=['bxp_nobox'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_nobox():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats, showbox=False)
@image_comparison(baseline_images=['bxp_withmean_point'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_showmean():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats, showmeans=True, meanline=False)
@image_comparison(baseline_images=['bxp_withmean_line'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_showmeanasline():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats, showmeans=True, meanline=True)
@image_comparison(baseline_images=['bxp_scalarwidth'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_scalarwidth():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats, widths=0.25)
@image_comparison(baseline_images=['bxp_customwidths'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_customwidths():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats, widths=[0.10, 0.25, 0.65, 0.85])
@image_comparison(baseline_images=['bxp_custompositions'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_bxp_custompositions():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.bxp(logstats, positions=[1, 5, 6, 7])
@cleanup
def test_bxp_bad_widths():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
assert_raises(ValueError, ax.bxp, logstats, widths=[1])
@cleanup
def test_bxp_bad_positions():
np.random.seed(937)
logstats = matplotlib.cbook.boxplot_stats(
np.random.lognormal(mean=1.25, sigma=1., size=(37, 4))
)
fig, ax = plt.subplots()
ax.set_yscale('log')
assert_raises(ValueError, ax.bxp, logstats, positions=[2, 3])
@image_comparison(baseline_images=['boxplot'])
def test_boxplot():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
ax.boxplot([x, x], bootstrap=10000, notch=1)
ax.set_ylim((-30, 30))
@image_comparison(baseline_images=['boxplot_sym2'],
remove_text=True, extensions=['png'])
def test_boxplot_sym2():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, [ax1, ax2] = plt.subplots(1, 2)
ax1.boxplot([x, x], bootstrap=10000, sym='^')
ax1.set_ylim((-30, 30))
ax2.boxplot([x, x], bootstrap=10000, sym='g')
ax2.set_ylim((-30, 30))
@image_comparison(baseline_images=['boxplot_sym'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_boxplot_sym():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
ax.boxplot([x, x], sym='gs')
ax.set_ylim((-30, 30))
@image_comparison(baseline_images=['boxplot_autorange_whiskers'])
def test_boxplot_autorange_whiskers():
x = np.ones(140)
x = np.hstack([0, x, 2])
fig, ax = plt.subplots()
ax.boxplot([x, x], bootstrap=10000, notch=1)
ax.set_ylim((-5, 5))
@image_comparison(baseline_images=['boxplot_with_CIarray'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_boxplot_with_CIarray():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig = plt.figure()
ax = fig.add_subplot(111)
CIs = np.array([[-1.5, 3.], [-1., 3.5]])
# show 1 boxplot with mpl medians/conf. interfals, 1 with manual values
ax.boxplot([x, x], bootstrap=10000, usermedians=[None, 1.0],
conf_intervals=CIs, notch=1)
ax.set_ylim((-30, 30))
@image_comparison(baseline_images=['boxplot_no_inverted_whisker'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_boxplot_no_weird_whisker():
x = np.array([3, 9000, 150, 88, 350, 200000, 1400, 960],
dtype=np.float64)
ax1 = plt.axes()
ax1.boxplot(x)
ax1.set_yscale('log')
ax1.yaxis.grid(False, which='minor')
ax1.xaxis.grid(False)
@cleanup
def test_boxplot_bad_medians_1():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
assert_raises(ValueError, ax.boxplot, x, usermedians=[1, 2])
@cleanup
def test_boxplot_bad_medians_2():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
assert_raises(ValueError, ax.boxplot, [x, x], usermedians=[[1, 2], [1, 2]])
@cleanup
def test_boxplot_bad_ci_1():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
assert_raises(ValueError, ax.boxplot, [x, x],
conf_intervals=[[1, 2]])
@cleanup
def test_boxplot_bad_ci_2():
x = np.linspace(-7, 7, 140)
x = np.hstack([-25, x, 25])
fig, ax = plt.subplots()
assert_raises(ValueError, ax.boxplot, [x, x],
conf_intervals=[[1, 2], [1]])
@image_comparison(baseline_images=['boxplot_mod_artists_after_plotting'],
remove_text=True, extensions=['png'],
savefig_kwarg={'dpi': 40})
def test_boxplot_mod_artist_after_plotting():
x = [0.15, 0.11, 0.06, 0.06, 0.12, 0.56, -0.56]
fig, ax = plt.subplots()
bp = ax.boxplot(x, sym="o")
for key in bp:
for obj in bp[key]:
obj.set_color('green')
@image_comparison(baseline_images=['violinplot_vert_baseline'],
extensions=['png'])
def test_vert_violinplot_baseline():
# First 9 digits of frac(sqrt(2))
np.random.seed(414213562)
data = [np.random.normal(size=100) for i in range(4)]
ax = plt.axes()
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,
showmedians=0)
@image_comparison(baseline_images=['violinplot_vert_showmeans'],
extensions=['png'])
def test_vert_violinplot_showmeans():
ax = plt.axes()
# First 9 digits of frac(sqrt(3))
np.random.seed(732050807)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), showmeans=1, showextrema=0,
showmedians=0)
@image_comparison(baseline_images=['violinplot_vert_showextrema'],
extensions=['png'])
def test_vert_violinplot_showextrema():
ax = plt.axes()
# First 9 digits of frac(sqrt(5))
np.random.seed(236067977)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=1,
showmedians=0)
@image_comparison(baseline_images=['violinplot_vert_showmedians'],
extensions=['png'])
def test_vert_violinplot_showmedians():
ax = plt.axes()
# First 9 digits of frac(sqrt(7))
np.random.seed(645751311)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,
showmedians=1)
@image_comparison(baseline_images=['violinplot_vert_showall'],
extensions=['png'])
def test_vert_violinplot_showall():
ax = plt.axes()
# First 9 digits of frac(sqrt(11))
np.random.seed(316624790)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), showmeans=1, showextrema=1,
showmedians=1)
@image_comparison(baseline_images=['violinplot_vert_custompoints_10'],
extensions=['png'])
def test_vert_violinplot_custompoints_10():
ax = plt.axes()
# First 9 digits of frac(sqrt(13))
np.random.seed(605551275)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,
showmedians=0, points=10)
@image_comparison(baseline_images=['violinplot_vert_custompoints_200'],
extensions=['png'])
def test_vert_violinplot_custompoints_200():
ax = plt.axes()
# First 9 digits of frac(sqrt(17))
np.random.seed(123105625)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), showmeans=0, showextrema=0,
showmedians=0, points=200)
@image_comparison(baseline_images=['violinplot_horiz_baseline'],
extensions=['png'])
def test_horiz_violinplot_baseline():
ax = plt.axes()
# First 9 digits of frac(sqrt(19))
np.random.seed(358898943)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=0, showmedians=0)
@image_comparison(baseline_images=['violinplot_horiz_showmedians'],
extensions=['png'])
def test_horiz_violinplot_showmedians():
ax = plt.axes()
# First 9 digits of frac(sqrt(23))
np.random.seed(795831523)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=0, showmedians=1)
@image_comparison(baseline_images=['violinplot_horiz_showmeans'],
extensions=['png'])
def test_horiz_violinplot_showmeans():
ax = plt.axes()
# First 9 digits of frac(sqrt(29))
np.random.seed(385164807)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=1,
showextrema=0, showmedians=0)
@image_comparison(baseline_images=['violinplot_horiz_showextrema'],
extensions=['png'])
def test_horiz_violinplot_showextrema():
ax = plt.axes()
# First 9 digits of frac(sqrt(31))
np.random.seed(567764362)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=1, showmedians=0)
@image_comparison(baseline_images=['violinplot_horiz_showall'],
extensions=['png'])
def test_horiz_violinplot_showall():
ax = plt.axes()
# First 9 digits of frac(sqrt(37))
np.random.seed(82762530)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=1,
showextrema=1, showmedians=1)
@image_comparison(baseline_images=['violinplot_horiz_custompoints_10'],
extensions=['png'])
def test_horiz_violinplot_custompoints_10():
ax = plt.axes()
# First 9 digits of frac(sqrt(41))
np.random.seed(403124237)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=0, showmedians=0, points=10)
@image_comparison(baseline_images=['violinplot_horiz_custompoints_200'],
extensions=['png'])
def test_horiz_violinplot_custompoints_200():
ax = plt.axes()
# First 9 digits of frac(sqrt(43))
np.random.seed(557438524)
data = [np.random.normal(size=100) for i in range(4)]
ax.violinplot(data, positions=range(4), vert=False, showmeans=0,
showextrema=0, showmedians=0, points=200)
@cleanup
def test_violinplot_bad_positions():
ax = plt.axes()
# First 9 digits of frac(sqrt(47))
np.random.seed(855654600)
data = [np.random.normal(size=100) for i in range(4)]
assert_raises(ValueError, ax.violinplot, data, positions=range(5))
@cleanup
def test_violinplot_bad_widths():
ax = plt.axes()
# First 9 digits of frac(sqrt(53))
np.random.seed(280109889)
data = [np.random.normal(size=100) for i in range(4)]
assert_raises(ValueError, ax.violinplot, data, positions=range(4),
widths=[1, 2, 3])
@cleanup
def test_manage_xticks():
_, ax = plt.subplots()
ax.set_xlim(0, 4)
old_xlim = ax.get_xlim()
np.random.seed(0)
y1 = np.random.normal(10, 3, 20)
y2 = np.random.normal(3, 1, 20)
ax.boxplot([y1, y2], positions = [1,2],
manage_xticks=False)
new_xlim = ax.get_xlim()
assert_array_equal(old_xlim, new_xlim)
@image_comparison(baseline_images=['errorbar_basic', 'errorbar_mixed'])
def test_errorbar():
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
yerr = 0.1 + 0.2*np.sqrt(x)
xerr = 0.1 + yerr
# First illustrate basic pyplot interface, using defaults where possible.
fig = plt.figure()
ax = fig.gca()
ax.errorbar(x, y, xerr=0.2, yerr=0.4)
ax.set_title("Simplest errorbars, 0.2 in x, 0.4 in y")
# Now switch to a more OO interface to exercise more features.
fig, axs = plt.subplots(nrows=2, ncols=2, sharex=True)
ax = axs[0, 0]
ax.errorbar(x, y, yerr=yerr, fmt='o')
ax.set_title('Vert. symmetric')
# With 4 subplots, reduce the number of axis ticks to avoid crowding.
ax.locator_params(nbins=4)
ax = axs[0, 1]
ax.errorbar(x, y, xerr=xerr, fmt='o', alpha=0.4)
ax.set_title('Hor. symmetric w/ alpha')
ax = axs[1, 0]
ax.errorbar(x, y, yerr=[yerr, 2*yerr], xerr=[xerr, 2*xerr], fmt='--o')
ax.set_title('H, V asymmetric')
ax = axs[1, 1]
ax.set_yscale('log')
# Here we have to be careful to keep all y values positive:
ylower = np.maximum(1e-2, y - yerr)
yerr_lower = y - ylower
ax.errorbar(x, y, yerr=[yerr_lower, 2*yerr], xerr=xerr,
fmt='o', ecolor='g', capthick=2)
ax.set_title('Mixed sym., log y')
fig.suptitle('Variable errorbars')
@image_comparison(baseline_images=['errorbar_limits'])
def test_errorbar_limits():
x = np.arange(0.5, 5.5, 0.5)
y = np.exp(-x)
xerr = 0.1
yerr = 0.2
ls = 'dotted'
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# standard error bars
plt.errorbar(x, y, xerr=xerr, yerr=yerr, ls=ls, color='blue')
# including upper limits
uplims = np.zeros(x.shape)
uplims[[1, 5, 9]] = True
plt.errorbar(x, y+0.5, xerr=xerr, yerr=yerr, uplims=uplims, ls=ls,
color='green')
# including lower limits
lolims = np.zeros(x.shape)
lolims[[2, 4, 8]] = True
plt.errorbar(x, y+1.0, xerr=xerr, yerr=yerr, lolims=lolims, ls=ls,
color='red')
# including upper and lower limits
plt.errorbar(x, y+1.5, marker='o', ms=8, xerr=xerr, yerr=yerr,
lolims=lolims, uplims=uplims, ls=ls, color='magenta')
# including xlower and xupper limits
xerr = 0.2
yerr = np.zeros(x.shape) + 0.2
yerr[[3, 6]] = 0.3
xlolims = lolims
xuplims = uplims
lolims = np.zeros(x.shape)
uplims = np.zeros(x.shape)
lolims[[6]] = True
uplims[[3]] = True
plt.errorbar(x, y+2.1, marker='o', ms=8, xerr=xerr, yerr=yerr,
xlolims=xlolims, xuplims=xuplims, uplims=uplims,
lolims=lolims, ls='none', mec='blue', capsize=0,
color='cyan')
ax.set_xlim((0, 5.5))
ax.set_title('Errorbar upper and lower limits')
@image_comparison(baseline_images=['hist_stacked_stepfilled'])
def test_hist_stacked_stepfilled():
# make some data
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist((d1, d2), histtype="stepfilled", stacked=True)
@image_comparison(baseline_images=['hist_offset'])
def test_hist_offset():
# make some data
d1 = np.linspace(0, 10, 50)
d2 = np.linspace(1, 3, 20)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(d1, bottom=5)
ax.hist(d2, bottom=15)
@image_comparison(baseline_images=['hist_step'], extensions=['png'], remove_text=True)
def test_hist_step():
# make some data
d1 = np.linspace(1, 3, 20)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(d1, histtype="step")
ax.set_ylim(0, 10)
ax.set_xlim(-1, 5)
@image_comparison(baseline_images=['hist_step_horiz'], extensions=['png'])
def test_hist_step_horiz():
# make some data
d1 = np.linspace(0, 10, 50)
d2 = np.linspace(1, 3, 20)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist((d1, d2), histtype="step", orientation="horizontal")
@image_comparison(baseline_images=['hist_stacked_weights'])
def test_hist_stacked_weighted():
# make some data
d1 = np.linspace(0, 10, 50)
d2 = np.linspace(1, 3, 20)
w1 = np.linspace(0.01, 3.5, 50)
w2 = np.linspace(0.05, 2., 20)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist((d1, d2), weights=(w1, w2), histtype="stepfilled", stacked=True)
@cleanup
def test_stem_args():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
x = list(xrange(10))
y = list(xrange(10))
# Test the call signatures
ax.stem(y)
ax.stem(x, y)
ax.stem(x, y, 'r--')
ax.stem(x, y, 'r--', basefmt='b--')
@cleanup
def test_stem_dates():
fig, ax = plt.subplots(1, 1)
from dateutil import parser
x = parser.parse("2013-9-28 11:00:00")
y = 100
x1 = parser.parse("2013-9-28 12:00:00")
y1 = 200
ax.stem([x, x1], [y, y1], "*-")
@image_comparison(baseline_images=['hist_stacked_stepfilled_alpha'])
def test_hist_stacked_stepfilled_alpha():
# make some data
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist((d1, d2), histtype="stepfilled", stacked=True, alpha=0.5)
@image_comparison(baseline_images=['hist_stacked_step'])
def test_hist_stacked_step():
# make some data
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist((d1, d2), histtype="step", stacked=True)
@image_comparison(baseline_images=['hist_stacked_normed'])
def test_hist_stacked_normed():
# make some data
d1 = np.linspace(1, 3, 20)
d2 = np.linspace(0, 10, 50)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist((d1, d2), stacked=True, normed=True)
@image_comparison(baseline_images=['hist_step_bottom'], extensions=['png'], remove_text=True)
def test_hist_step_bottom():
# make some data
d1 = np.linspace(1, 3, 20)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(d1, bottom=np.arange(10), histtype="stepfilled")
@image_comparison(baseline_images=['hist_stacked_bar'])
def test_hist_stacked_bar():
# make some data
d = [[100, 100, 100, 100, 200, 320, 450, 80, 20, 600, 310, 800],
[20, 23, 50, 11, 100, 420], [120, 120, 120, 140, 140, 150, 180],
[60, 60, 60, 60, 300, 300, 5, 5, 5, 5, 10, 300],
[555, 555, 555, 30, 30, 30, 30, 30, 100, 100, 100, 100, 30, 30],
[30, 30, 30, 30, 400, 400, 400, 400, 400, 400, 400, 400]]
colors = [(0.5759849696758961, 1.0, 0.0), (0.0, 1.0, 0.350624650815206),
(0.0, 1.0, 0.6549834156005998), (0.0, 0.6569064625276622, 1.0),
(0.28302699607823545, 0.0, 1.0), (0.6849123462299822, 0.0, 1.0)]
labels = ['green', 'orange', ' yellow', 'magenta', 'black']
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(d, bins=10, histtype='barstacked', align='mid', color=colors, label=labels)
ax.legend(loc='upper right', bbox_to_anchor=(1.0, 1.0), ncol=1)
@cleanup
def test_hist_emptydata():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist([[], range(10), range(10)], histtype="step")
@image_comparison(baseline_images=['transparent_markers'], remove_text=True)
def test_transparent_markers():
np.random.seed(0)
data = np.random.random(50)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data, 'D', mfc='none', markersize=100)
@image_comparison(baseline_images=['mollweide_grid'], remove_text=True)
def test_mollweide_grid():
# test that both horizontal and vertical gridlines appear on the Mollweide
# projection
fig = plt.figure()
ax = fig.add_subplot(111, projection='mollweide')
ax.grid()
@cleanup
def test_mollweide_forward_inverse_closure():
# test that the round-trip Mollweide forward->inverse transformation is an
# approximate identity
fig = plt.figure()
ax = fig.add_subplot(111, projection='mollweide')
# set up 1-degree grid in longitude, latitude
lon = np.linspace(-np.pi, np.pi, 360)
lat = np.linspace(-np.pi / 2.0, np.pi / 2.0, 180)
lon, lat = np.meshgrid(lon, lat)
ll = np.vstack((lon.flatten(), lat.flatten())).T
# perform forward transform
xy = ax.transProjection.transform(ll)
# perform inverse transform
ll2 = ax.transProjection.inverted().transform(xy)
# compare
np.testing.assert_array_almost_equal(ll, ll2, 3)
@cleanup
def test_mollweide_inverse_forward_closure():
# test that the round-trip Mollweide inverse->forward transformation is an
# approximate identity
fig = plt.figure()
ax = fig.add_subplot(111, projection='mollweide')
# set up grid in x, y
x = np.linspace(0, 1, 500)
x, y = np.meshgrid(x, x)
xy = np.vstack((x.flatten(), y.flatten())).T
# perform inverse transform
ll = ax.transProjection.inverted().transform(xy)
# perform forward transform
xy2 = ax.transProjection.transform(ll)
# compare
np.testing.assert_array_almost_equal(xy, xy2, 3)
@image_comparison(baseline_images=['test_alpha'], remove_text=True)
def test_alpha():
np.random.seed(0)
data = np.random.random(50)
fig = plt.figure()
ax = fig.add_subplot(111)
# alpha=.5 markers, solid line
ax.plot(data, '-D', color=[1, 0, 0], mfc=[1, 0, 0, .5],
markersize=20, lw=10)
# everything solid by kwarg
ax.plot(data + 2, '-D', color=[1, 0, 0, .5], mfc=[1, 0, 0, .5],
markersize=20, lw=10,
alpha=1)
# everything alpha=.5 by kwarg
ax.plot(data + 4, '-D', color=[1, 0, 0], mfc=[1, 0, 0],
markersize=20, lw=10,
alpha=.5)
# everything alpha=.5 by colors
ax.plot(data + 6, '-D', color=[1, 0, 0, .5], mfc=[1, 0, 0, .5],
markersize=20, lw=10)
# alpha=.5 line, solid markers
ax.plot(data + 8, '-D', color=[1, 0, 0, .5], mfc=[1, 0, 0],
markersize=20, lw=10)
@image_comparison(baseline_images=['eventplot'], remove_text=True)
def test_eventplot():
'''
test that eventplot produces the correct output
'''
np.random.seed(0)
data1 = np.random.random([32, 20]).tolist()
data2 = np.random.random([6, 20]).tolist()
data = data1 + data2
num_datasets = len(data)
colors1 = [[0, 1, .7]] * len(data1)
colors2 = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, .75, 0],
[1, 0, 1],
[0, 1, 1]]
colors = colors1 + colors2
lineoffsets1 = 12 + np.arange(0, len(data1)) * .33
lineoffsets2 = [-15, -3, 1, 1.5, 6, 10]
lineoffsets = lineoffsets1.tolist() + lineoffsets2
linelengths1 = [.33] * len(data1)
linelengths2 = [5, 2, 1, 1, 3, 1.5]
linelengths = linelengths1 + linelengths2
fig = plt.figure()
axobj = fig.add_subplot(111)
colls = axobj.eventplot(data, colors=colors, lineoffsets=lineoffsets,
linelengths=linelengths)
num_collections = len(colls)
np.testing.assert_equal(num_collections, num_datasets)
@image_comparison(baseline_images=['test_eventplot_defaults'], extensions=['png'], remove_text=True)
def test_eventplot_defaults():
'''
test that eventplot produces the correct output given the default params
(see bug #3728)
'''
np.random.seed(0)
data1 = np.random.random([32, 20]).tolist()
data2 = np.random.random([6, 20]).tolist()
data = data1 + data2
fig = plt.figure()
axobj = fig.add_subplot(111)
colls = axobj.eventplot(data)
@cleanup
def test_empty_eventplot():
fig, ax = plt.subplots(1, 1)
ax.eventplot([[]], colors=[(0.0, 0.0, 0.0, 0.0)])
plt.draw()
@image_comparison(baseline_images=['vertex_markers'], extensions=['png'],
remove_text=True)
def test_vertex_markers():
data = list(xrange(10))
marker_as_tuple = ((-1, -1), (1, -1), (1, 1), (-1, 1))
marker_as_list = [(-1, -1), (1, -1), (1, 1), (-1, 1)]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data, linestyle='', marker=marker_as_tuple, mfc='k')
ax.plot(data[::-1], linestyle='', marker=marker_as_list, mfc='b')
ax.set_xlim([-1, 10])
ax.set_ylim([-1, 10])
@image_comparison(baseline_images=['vline_hline_zorder',
'errorbar_zorder'])
def test_eb_line_zorder():
x = list(xrange(10))
# First illustrate basic pyplot interface, using defaults where possible.
fig = plt.figure()
ax = fig.gca()
ax.plot(x, lw=10, zorder=5)
ax.axhline(1, color='red', lw=10, zorder=1)
ax.axhline(5, color='green', lw=10, zorder=10)
ax.axvline(7, color='m', lw=10, zorder=7)
ax.axvline(2, color='k', lw=10, zorder=3)
ax.set_title("axvline and axhline zorder test")
# Now switch to a more OO interface to exercise more features.
fig = plt.figure()
ax = fig.gca()
x = list(xrange(10))
y = np.zeros(10)
yerr = list(xrange(10))
ax.errorbar(x, y, yerr=yerr, zorder=5, lw=5, color='r')
for j in range(10):
ax.axhline(j, lw=5, color='k', zorder=j)
ax.axhline(-j, lw=5, color='k', zorder=j)
ax.set_title("errorbar zorder test")
@image_comparison(baseline_images=['step_linestyle'], remove_text=True)
def test_step_linestyle():
x = y = np.arange(10)
# First illustrate basic pyplot interface, using defaults where possible.
fig, ax_lst = plt.subplots(2, 2)
ax_lst = ax_lst.flatten()
ln_styles = ['-', '--', '-.', ':']
for ax, ls in zip(ax_lst, ln_styles):
ax.step(x, y, lw=5, linestyle=ls, where='pre')
ax.step(x, y + 1, lw=5, linestyle=ls, where='mid')
ax.step(x, y + 2, lw=5, linestyle=ls, where='post')
ax.set_xlim([-1, 5])
ax.set_ylim([-1, 7])
@image_comparison(baseline_images=['mixed_collection'], remove_text=True)
def test_mixed_collection():
from matplotlib import patches
from matplotlib import collections
x = list(xrange(10))
# First illustrate basic pyplot interface, using defaults where possible.
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
c = patches.Circle((8, 8), radius=4, facecolor='none', edgecolor='green')
# PDF can optimize this one
p1 = collections.PatchCollection([c], match_original=True)
p1.set_offsets([[0, 0], [24, 24]])
p1.set_linewidths([1, 5])
# PDF can't optimize this one, because the alpha of the edge changes
p2 = collections.PatchCollection([c], match_original=True)
p2.set_offsets([[48, 0], [-32, -16]])
p2.set_linewidths([1, 5])
p2.set_edgecolors([[0, 0, 0.1, 1.0], [0, 0, 0.1, 0.5]])
ax.patch.set_color('0.5')
ax.add_collection(p1)
ax.add_collection(p2)
ax.set_xlim(0, 16)
ax.set_ylim(0, 16)
@cleanup
def test_subplot_key_hash():
ax = plt.subplot(np.float64(5.5), np.int64(1), np.float64(1.2))
ax.twinx()
assert_equal((5, 1, 0, None), ax.get_subplotspec().get_geometry())
@image_comparison(baseline_images=['specgram_freqs',
'specgram_freqs_linear'],
remove_text=True, extensions=['png'])
def test_specgram_freqs():
'''test axes.specgram in default (psd) mode with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
fstims2 = [Fs/4.7, Fs/5.6, Fs/11.9]
NFFT = int(1000 * Fs / min(fstims1 + fstims2))
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y1 = np.zeros(x.size)
y2 = np.zeros(x.size)
for fstim1, fstim2 in zip(fstims1, fstims2):
y1 += np.sin(fstim1 * x * np.pi * 2)
y2 += np.sin(fstim2 * x * np.pi * 2)
y = np.hstack([y1, y2])
fig1 = plt.figure()
fig2 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax21 = fig2.add_subplot(3, 1, 1)
ax22 = fig2.add_subplot(3, 1, 2)
ax23 = fig2.add_subplot(3, 1, 3)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided')
spec21 = ax21.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default',
scale='linear', norm=matplotlib.colors.LogNorm())
spec22 = ax22.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
scale='linear', norm=matplotlib.colors.LogNorm())
spec23 = ax23.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
scale='linear', norm=matplotlib.colors.LogNorm())
@image_comparison(baseline_images=['specgram_noise',
'specgram_noise_linear'],
remove_text=True, extensions=['png'])
def test_specgram_noise():
'''test axes.specgram in default (psd) mode with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2])
fig1 = plt.figure()
fig2 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax21 = fig2.add_subplot(3, 1, 1)
ax22 = fig2.add_subplot(3, 1, 2)
ax23 = fig2.add_subplot(3, 1, 3)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided')
spec21 = ax21.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default',
scale='linear', norm=matplotlib.colors.LogNorm())
spec22 = ax22.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
scale='linear', norm=matplotlib.colors.LogNorm())
spec23 = ax23.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
scale='linear', norm=matplotlib.colors.LogNorm())
@image_comparison(baseline_images=['specgram_magnitude_freqs',
'specgram_magnitude_freqs_linear'],
remove_text=True, extensions=['png'])
def test_specgram_magnitude_freqs():
'''test axes.specgram in magnitude mode with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
fstims2 = [Fs/4.7, Fs/5.6, Fs/11.9]
NFFT = int(1000 * Fs / min(fstims1 + fstims2))
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y1 = np.zeros(x.size)
y2 = np.zeros(x.size)
for i, (fstim1, fstim2) in enumerate(zip(fstims1, fstims2)):
y1 += np.sin(fstim1 * x * np.pi * 2)
y2 += np.sin(fstim2 * x * np.pi * 2)
y1[-1] = y1[-1]/y1[-1]
y2[-1] = y2[-1]/y2[-1]
y = np.hstack([y1, y2])
fig1 = plt.figure()
fig2 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax21 = fig2.add_subplot(3, 1, 1)
ax22 = fig2.add_subplot(3, 1, 2)
ax23 = fig2.add_subplot(3, 1, 3)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='magnitude')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='magnitude')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='magnitude')
spec21 = ax21.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='magnitude',
scale='linear', norm=matplotlib.colors.LogNorm())
spec22 = ax22.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='magnitude',
scale='linear', norm=matplotlib.colors.LogNorm())
spec23 = ax23.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='magnitude',
scale='linear', norm=matplotlib.colors.LogNorm())
@image_comparison(baseline_images=['specgram_magnitude_noise',
'specgram_magnitude_noise_linear'],
remove_text=True, extensions=['png'])
def test_specgram_magnitude_noise():
'''test axes.specgram in magnitude mode with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2])
fig1 = plt.figure()
fig2 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax21 = fig2.add_subplot(3, 1, 1)
ax22 = fig2.add_subplot(3, 1, 2)
ax23 = fig2.add_subplot(3, 1, 3)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='magnitude')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='magnitude')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='magnitude')
spec21 = ax21.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='magnitude',
scale='linear', norm=matplotlib.colors.LogNorm())
spec22 = ax22.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='magnitude',
scale='linear', norm=matplotlib.colors.LogNorm())
spec23 = ax23.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='magnitude',
scale='linear', norm=matplotlib.colors.LogNorm())
@image_comparison(baseline_images=['specgram_angle_freqs'],
remove_text=True, extensions=['png'])
def test_specgram_angle_freqs():
'''test axes.specgram in angle mode with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
fstims2 = [Fs/4.7, Fs/5.6, Fs/11.9]
NFFT = int(1000 * Fs / min(fstims1 + fstims2))
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y1 = np.zeros(x.size)
y2 = np.zeros(x.size)
for i, (fstim1, fstim2) in enumerate(zip(fstims1, fstims2)):
y1 += np.sin(fstim1 * x * np.pi * 2)
y2 += np.sin(fstim2 * x * np.pi * 2)
y1[-1] = y1[-1]/y1[-1]
y2[-1] = y2[-1]/y2[-1]
y = np.hstack([y1, y2])
fig1 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax11.hold(True)
ax12.hold(True)
ax13.hold(True)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='angle')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='angle')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='angle')
assert_raises(ValueError, ax11.specgram, y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='default',
mode='phase', scale='dB')
assert_raises(ValueError, ax12.specgram, y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='onesided',
mode='phase', scale='dB')
assert_raises(ValueError, ax13.specgram, y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='twosided',
mode='phase', scale='dB')
@image_comparison(baseline_images=['specgram_angle_noise'],
remove_text=True, extensions=['png'])
def test_specgram_noise_angle():
'''test axes.specgram in angle mode with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2])
fig1 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax11.hold(True)
ax12.hold(True)
ax13.hold(True)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='angle')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='angle')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='angle')
assert_raises(ValueError, ax11.specgram, y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='default',
mode='phase', scale='dB')
assert_raises(ValueError, ax12.specgram, y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='onesided',
mode='phase', scale='dB')
assert_raises(ValueError, ax13.specgram, y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='twosided',
mode='phase', scale='dB')
@image_comparison(baseline_images=['specgram_phase_freqs'],
remove_text=True, extensions=['png'])
def test_specgram_freqs_phase():
'''test axes.specgram in phase mode with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
fstims2 = [Fs/4.7, Fs/5.6, Fs/11.9]
NFFT = int(1000 * Fs / min(fstims1 + fstims2))
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y1 = np.zeros(x.size)
y2 = np.zeros(x.size)
for i, (fstim1, fstim2) in enumerate(zip(fstims1, fstims2)):
y1 += np.sin(fstim1 * x * np.pi * 2)
y2 += np.sin(fstim2 * x * np.pi * 2)
y1[-1] = y1[-1]/y1[-1]
y2[-1] = y2[-1]/y2[-1]
y = np.hstack([y1, y2])
fig1 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax11.hold(True)
ax12.hold(True)
ax13.hold(True)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default', mode='phase')
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided', mode='phase')
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided', mode='phase')
assert_raises(ValueError, ax11.specgram, y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='default',
mode='phase', scale='dB')
assert_raises(ValueError, ax12.specgram, y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='onesided',
mode='phase', scale='dB')
assert_raises(ValueError, ax13.specgram, y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='twosided',
mode='phase', scale='dB')
@image_comparison(baseline_images=['specgram_phase_noise'],
remove_text=True, extensions=['png'])
def test_specgram_noise_phase():
'''test axes.specgram in phase mode with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2])
fig1 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax11.hold(True)
ax12.hold(True)
ax13.hold(True)
spec11 = ax11.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default',
mode='phase', )
spec12 = ax12.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
mode='phase', )
spec13 = ax13.specgram(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
mode='phase', )
assert_raises(ValueError, ax11.specgram, y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='default',
mode='phase', scale='dB')
assert_raises(ValueError, ax12.specgram, y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='onesided',
mode='phase', scale='dB')
assert_raises(ValueError, ax13.specgram, y, NFFT=NFFT, Fs=Fs,
noverlap=noverlap, pad_to=pad_to, sides='twosided',
mode='phase', scale='dB')
@image_comparison(baseline_images=['psd_freqs'], remove_text=True,
extensions=['png'])
def test_psd_freqs():
'''test axes.psd with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
fstims2 = [Fs/4.7, Fs/5.6, Fs/11.9]
NFFT = int(1000 * Fs / min(fstims1 + fstims2))
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y1 = np.zeros(x.size)
y2 = np.zeros(x.size)
for fstim1, fstim2 in zip(fstims1, fstims2):
y1 += np.sin(fstim1 * x * np.pi * 2)
y2 += np.sin(fstim2 * x * np.pi * 2)
y = np.hstack([y1, y2])
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
psd1, freqs1 = ax1.psd(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default')
psd2, freqs2 = ax2.psd(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
return_line=False)
psd3, freqs3, line3 = ax3.psd(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
return_line=True)
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['psd_noise'], remove_text=True,
extensions=['png'])
def test_psd_noise():
'''test axes.psd with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2])
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
psd1, freqs1 = ax1.psd(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default')
psd2, freqs2 = ax2.psd(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
return_line=False)
psd3, freqs3, line3 = ax3.psd(y, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
return_line=True)
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['csd_freqs'], remove_text=True,
extensions=['png'])
def test_csd_freqs():
'''test axes.csd with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
fstims2 = [Fs/4.7, Fs/5.6, Fs/11.9]
NFFT = int(1000 * Fs / min(fstims1 + fstims2))
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y1 = np.zeros(x.size)
y2 = np.zeros(x.size)
for fstim1, fstim2 in zip(fstims1, fstims2):
y1 += np.sin(fstim1 * x * np.pi * 2)
y2 += np.sin(fstim2 * x * np.pi * 2)
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
csd1, freqs1 = ax1.csd(y1, y2, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default')
csd2, freqs2 = ax2.csd(y1, y2, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
return_line=False)
csd3, freqs3, line3 = ax3.csd(y1, y2, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
return_line=True)
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['csd_noise'], remove_text=True,
extensions=['png'])
def test_csd_noise():
'''test axes.csd with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
noverlap = int(NFFT / 2)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
csd1, freqs1 = ax1.csd(y1, y2, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='default')
csd2, freqs2 = ax2.csd(y1, y2, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='onesided',
return_line=False)
csd3, freqs3, line3 = ax3.csd(y1, y2, NFFT=NFFT, Fs=Fs, noverlap=noverlap,
pad_to=pad_to, sides='twosided',
return_line=True)
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['magnitude_spectrum_freqs_linear',
'magnitude_spectrum_freqs_dB'],
remove_text=True,
extensions=['png'])
def test_magnitude_spectrum_freqs():
'''test axes.magnitude_spectrum with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
NFFT = int(1000 * Fs / min(fstims1))
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y = np.zeros(x.size)
for i, fstim1 in enumerate(fstims1):
y += np.sin(fstim1 * x * np.pi * 2) * 10**i
y = y
fig1 = plt.figure()
fig2 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax21 = fig2.add_subplot(3, 1, 1)
ax22 = fig2.add_subplot(3, 1, 2)
ax23 = fig2.add_subplot(3, 1, 3)
spec11, freqs11, line11 = ax11.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default')
spec12, freqs12, line12 = ax12.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided')
spec13, freqs13, line13 = ax13.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided')
spec21, freqs21, line21 = ax21.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default',
scale='dB')
spec22, freqs22, line22 = ax22.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided',
scale='dB')
spec23, freqs23, line23 = ax23.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided',
scale='dB')
ax11.set_xlabel('')
ax12.set_xlabel('')
ax13.set_xlabel('')
ax11.set_ylabel('')
ax12.set_ylabel('')
ax13.set_ylabel('')
ax21.set_xlabel('')
ax22.set_xlabel('')
ax23.set_xlabel('')
ax21.set_ylabel('')
ax22.set_ylabel('')
ax23.set_ylabel('')
@image_comparison(baseline_images=['magnitude_spectrum_noise_linear',
'magnitude_spectrum_noise_dB'],
remove_text=True,
extensions=['png'])
def test_magnitude_spectrum_noise():
'''test axes.magnitude_spectrum with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2]) - .5
fig1 = plt.figure()
fig2 = plt.figure()
ax11 = fig1.add_subplot(3, 1, 1)
ax12 = fig1.add_subplot(3, 1, 2)
ax13 = fig1.add_subplot(3, 1, 3)
ax21 = fig2.add_subplot(3, 1, 1)
ax22 = fig2.add_subplot(3, 1, 2)
ax23 = fig2.add_subplot(3, 1, 3)
spec11, freqs11, line11 = ax11.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default')
spec12, freqs12, line12 = ax12.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided')
spec13, freqs13, line13 = ax13.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided')
spec21, freqs21, line21 = ax21.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default',
scale='dB')
spec22, freqs22, line22 = ax22.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided',
scale='dB')
spec23, freqs23, line23 = ax23.magnitude_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided',
scale='dB')
ax11.set_xlabel('')
ax12.set_xlabel('')
ax13.set_xlabel('')
ax11.set_ylabel('')
ax12.set_ylabel('')
ax13.set_ylabel('')
ax21.set_xlabel('')
ax22.set_xlabel('')
ax23.set_xlabel('')
ax21.set_ylabel('')
ax22.set_ylabel('')
ax23.set_ylabel('')
@image_comparison(baseline_images=['angle_spectrum_freqs'],
remove_text=True,
extensions=['png'])
def test_angle_spectrum_freqs():
'''test axes.angle_spectrum with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
NFFT = int(1000 * Fs / min(fstims1))
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y = np.zeros(x.size)
for i, fstim1 in enumerate(fstims1):
y += np.sin(fstim1 * x * np.pi * 2) * 10**i
y = y
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
spec1, freqs1, line1 = ax1.angle_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default')
spec2, freqs2, line2 = ax2.angle_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided')
spec3, freqs3, line3 = ax3.angle_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided')
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['angle_spectrum_noise'],
remove_text=True,
extensions=['png'])
def test_angle_spectrum_noise():
'''test axes.angle_spectrum with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2]) - .5
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
spec1, freqs1, line1 = ax1.angle_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default')
spec2, freqs2, line2 = ax2.angle_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided')
spec3, freqs3, line3 = ax3.angle_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided')
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['phase_spectrum_freqs'],
remove_text=True,
extensions=['png'])
def test_phase_spectrum_freqs():
'''test axes.phase_spectrum with sinusoidal stimuli'''
n = 10000
Fs = 100.
fstims1 = [Fs/4, Fs/5, Fs/11]
NFFT = int(1000 * Fs / min(fstims1))
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
x = np.arange(0, n, 1/Fs)
y = np.zeros(x.size)
for i, fstim1 in enumerate(fstims1):
y += np.sin(fstim1 * x * np.pi * 2) * 10**i
y = y
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
spec1, freqs1, line1 = ax1.phase_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default')
spec2, freqs2, line2 = ax2.phase_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided')
spec3, freqs3, line3 = ax3.phase_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided')
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['phase_spectrum_noise'],
remove_text=True,
extensions=['png'])
def test_phase_spectrum_noise():
'''test axes.phase_spectrum with noise stimuli'''
np.random.seed(0)
n = 10000
Fs = 100.
NFFT = int(1000 * Fs / 11)
pad_to = int(2 ** np.ceil(np.log2(NFFT)))
y1 = np.random.standard_normal(n)
y2 = np.random.rand(n)
y = np.hstack([y1, y2]) - .5
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax2 = fig.add_subplot(3, 1, 2)
ax3 = fig.add_subplot(3, 1, 3)
spec1, freqs1, line1 = ax1.phase_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='default')
spec2, freqs2, line2 = ax2.phase_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='onesided')
spec3, freqs3, line3 = ax3.phase_spectrum(y, Fs=Fs, pad_to=pad_to,
sides='twosided')
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('')
ax1.set_ylabel('')
ax2.set_ylabel('')
ax3.set_ylabel('')
@image_comparison(baseline_images=['twin_spines'], remove_text=True,
extensions=['png'])
def test_twin_spines():
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in six.itervalues(ax.spines):
sp.set_visible(False)
fig = plt.figure(figsize=(4, 3))
fig.subplots_adjust(right=0.75)
host = fig.add_subplot(111)
par1 = host.twinx()
par2 = host.twinx()
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
par2.spines["right"].set_position(("axes", 1.2))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(par2)
# Second, show the right spine.
par2.spines["right"].set_visible(True)
p1, = host.plot([0, 1, 2], [0, 1, 2], "b-")
p2, = par1.plot([0, 1, 2], [0, 3, 2], "r-")
p3, = par2.plot([0, 1, 2], [50, 30, 15], "g-")
host.set_xlim(0, 2)
host.set_ylim(0, 2)
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)
host.yaxis.label.set_color(p1.get_color())
par1.yaxis.label.set_color(p2.get_color())
par2.yaxis.label.set_color(p3.get_color())
tkw = dict(size=4, width=1.5)
host.tick_params(axis='y', colors=p1.get_color(), **tkw)
par1.tick_params(axis='y', colors=p2.get_color(), **tkw)
par2.tick_params(axis='y', colors=p3.get_color(), **tkw)
host.tick_params(axis='x', **tkw)
@image_comparison(baseline_images=['twin_spines_on_top'], extensions=['png'],
remove_text=True)
def test_twin_spines_on_top():
matplotlib.rcParams['axes.linewidth'] = 48.0
matplotlib.rcParams['lines.linewidth'] = 48.0
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
data = np.array([[1000, 1100, 1200, 1250],
[310, 301, 360, 400]])
ax2 = ax1.twinx()
ax1.plot(data[0], data[1]/1E3, color='#BEAED4')
ax1.fill_between(data[0], data[1]/1E3, color='#BEAED4', alpha=.8)
ax2.plot(data[0], data[1]/1E3, color='#7FC97F')
ax2.fill_between(data[0], data[1]/1E3, color='#7FC97F', alpha=.5)
@cleanup
def test_rcparam_grid_minor():
orig_grid = matplotlib.rcParams['axes.grid']
orig_locator = matplotlib.rcParams['axes.grid.which']
matplotlib.rcParams['axes.grid'] = True
values = (
(('both'), (True, True)),
(('major'), (True, False)),
(('minor'), (False, True))
)
for locator, result in values:
matplotlib.rcParams['axes.grid.which'] = locator
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
assert((ax.xaxis._gridOnMajor, ax.xaxis._gridOnMinor) == result)
matplotlib.rcParams['axes.grid'] = orig_grid
matplotlib.rcParams['axes.grid.which'] = orig_locator
@cleanup
def test_vline_limit():
fig = plt.figure()
ax = fig.gca()
ax.axvline(0.5)
ax.plot([-0.1, 0, 0.2, 0.1])
(ymin, ymax) = ax.get_ylim()
assert ymin == -0.1
assert ymax == 0.25
@cleanup
def test_empty_shared_subplots():
#empty plots with shared axes inherit limits from populated plots
fig, axes = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
axes[0].plot([1, 2, 3], [2, 4, 6])
x0, x1 = axes[1].get_xlim()
y0, y1 = axes[1].get_ylim()
assert x0 <= 1
assert x1 >= 3
assert y0 <= 2
assert y1 >= 6
@cleanup
def test_relim_visible_only():
x1 = (0., 10.)
y1 = (0., 10.)
x2 = (-10., 20.)
y2 = (-10., 30.)
fig = matplotlib.figure.Figure()
ax = fig.add_subplot(111)
ax.plot(x1, y1)
assert ax.get_xlim() == x1
assert ax.get_ylim() == y1
l = ax.plot(x2, y2)
assert ax.get_xlim() == x2
assert ax.get_ylim() == y2
l[0].set_visible(False)
assert ax.get_xlim() == x2
assert ax.get_ylim() == y2
ax.relim(visible_only=True)
ax.autoscale_view()
assert ax.get_xlim() == x1
assert ax.get_ylim() == y1
@cleanup
def test_text_labelsize():
"""
tests for issue #1172
"""
fig = plt.figure()
ax = fig.gca()
ax.tick_params(labelsize='large')
ax.tick_params(direction='out')
@image_comparison(baseline_images=['pie_linewidth_0'], extensions=['png'])
def test_pie_linewidth_0():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 0})
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@image_comparison(baseline_images=['pie_linewidth_2'], extensions=['png'])
def test_pie_linewidth_2():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
wedgeprops={'linewidth': 2})
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@image_comparison(baseline_images=['pie_ccw_true'], extensions=['png'])
def test_pie_ccw_true():
# The slices will be ordered and plotted counter-clockwise.
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=90,
counterclock=True)
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
@cleanup
def test_margins():
# test all ways margins can be called
data = [1, 10]
fig1, ax1 = plt.subplots(1, 1)
ax1.plot(data)
ax1.margins(1)
assert_equal(ax1.margins(), (1, 1))
fig2, ax2 = plt.subplots(1, 1)
ax2.plot(data)
ax2.margins(1, 0.5)
assert_equal(ax2.margins(), (1, 0.5))
fig3, ax3 = plt.subplots(1, 1)
ax3.plot(data)
ax3.margins(x=1, y=0.5)
assert_equal(ax3.margins(), (1, 0.5))
@cleanup
def test_pathological_hexbin():
# issue #2863
with warnings.catch_warnings(record=True) as w:
mylist = [10] * 100
fig, ax = plt.subplots(1, 1)
ax.hexbin(mylist, mylist)
plt.show()
assert_equal(len(w), 0)
@cleanup
def test_color_None():
# issue 3855
fig, ax = plt.subplots()
ax.plot([1,2], [1,2], color=None)
plt.show()
@cleanup
def test_numerical_hist_label():
fig, ax = plt.subplots()
ax.hist([range(15)] * 5, label=range(5))
if __name__ == '__main__':
import nose
import sys
args = ['-s', '--with-doctest']
argv = sys.argv
argv = argv[:1] + args + argv[1:]
nose.runmodule(argv=argv, exit=False)
| lgpl-3.0 |
quantopian/zipline | zipline/data/resample.py | 1 | 26927 | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from six import with_metaclass
from zipline.data._resample import (
_minute_to_session_open,
_minute_to_session_high,
_minute_to_session_low,
_minute_to_session_close,
_minute_to_session_volume,
)
from zipline.data.bar_reader import NoDataOnDate
from zipline.data.minute_bars import MinuteBarReader
from zipline.data.session_bars import SessionBarReader
from zipline.utils.memoize import lazyval
_MINUTE_TO_SESSION_OHCLV_HOW = OrderedDict((
('open', 'first'),
('high', 'max'),
('low', 'min'),
('close', 'last'),
('volume', 'sum'),
))
def minute_frame_to_session_frame(minute_frame, calendar):
"""
Resample a DataFrame with minute data into the frame expected by a
BcolzDailyBarWriter.
Parameters
----------
minute_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `dt` (minute dts)
calendar : trading_calendars.trading_calendar.TradingCalendar
A TradingCalendar on which session labels to resample from minute
to session.
Return
------
session_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `day` (datetime-like).
"""
how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c])
for c in minute_frame.columns)
labels = calendar.minute_index_to_session_labels(minute_frame.index)
return minute_frame.groupby(labels).agg(how)
def minute_to_session(column, close_locs, data, out):
"""
Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions.
"""
if column == 'open':
_minute_to_session_open(close_locs, data, out)
elif column == 'high':
_minute_to_session_high(close_locs, data, out)
elif column == 'low':
_minute_to_session_low(close_locs, data, out)
elif column == 'close':
_minute_to_session_close(close_locs, data, out)
elif column == 'volume':
_minute_to_session_volume(close_locs, data, out)
return out
class DailyHistoryAggregator(object):
"""
Converts minute pricing data into a daily summary, to be used for the
last slot in a call to history with a frequency of `1d`.
This summary is the same as a daily bar rollup of minute data, with the
distinction that the summary is truncated to the `dt` requested.
i.e. the aggregation slides forward during a the course of simulation day.
Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.
The aggregation rules for each price type is documented in their respective
"""
def __init__(self, market_opens, minute_reader, trading_calendar):
self._market_opens = market_opens
self._minute_reader = minute_reader
self._trading_calendar = trading_calendar
# The caches are structured as (date, market_open, entries), where
# entries is a dict of asset -> (last_visited_dt, value)
#
# Whenever an aggregation method determines the current value,
# the entry for the respective asset should be overwritten with a new
# entry for the current dt.value (int) and aggregation value.
#
# When the requested dt's date is different from date the cache is
# flushed, so that the cache entries do not grow unbounded.
#
# Example cache:
# cache = (date(2016, 3, 17),
# pd.Timestamp('2016-03-17 13:31', tz='UTC'),
# {
# 1: (1458221460000000000, np.nan),
# 2: (1458221460000000000, 42.0),
# })
self._caches = {
'open': None,
'high': None,
'low': None,
'close': None,
'volume': None
}
# The int value is used for deltas to avoid extra computation from
# creating new Timestamps.
self._one_min = pd.Timedelta('1 min').value
def _prelude(self, dt, field):
session = self._trading_calendar.minute_to_session_label(dt)
dt_value = dt.value
cache = self._caches[field]
if cache is None or cache[0] != session:
market_open = self._market_opens.loc[session]
cache = self._caches[field] = (session, market_open, {})
_, market_open, entries = cache
market_open = market_open.tz_localize('UTC')
if dt != market_open:
prev_dt = dt_value - self._one_min
else:
prev_dt = None
return market_open, prev_dt, dt_value, entries
def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['open'],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['open'],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens)
def highs(self, assets, dt):
"""
The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')
highs = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'high')
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'high')
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['high'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmax(np.append(window, last_max))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['high'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs)
def lows(self, assets, dt):
"""
The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')
lows = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'low')
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'low')
val = np.nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['low'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmin(np.append(window, last_min))
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['low'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows)
def closes(self, assets, dt):
"""
The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')
closes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
def _get_filled_close(asset):
"""
Returns the most recent non-nan close for the asset in this
session. If there has been no data in this session on or before the
`dt`, returns `nan`
"""
window = self._minute_reader.load_raw_arrays(
['close'],
market_open,
dt,
[asset],
)[0]
try:
return window[~np.isnan(window)][-1]
except IndexError:
return np.NaN
for asset in assets:
if not asset.is_alive_for_session(session_label):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'close')
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes)
def volumes(self, assets, dt):
"""
The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')
volumes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'volume')
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'volume')
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['volume'],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['volume'],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes)
class MinuteResampleSessionBarReader(SessionBarReader):
def __init__(self, calendar, minute_bar_reader):
self._calendar = calendar
self._minute_bar_reader = minute_bar_reader
def _get_resampled(self, columns, start_session, end_session, assets):
range_open = self._calendar.session_open(start_session)
range_close = self._calendar.session_close(end_session)
minute_data = self._minute_bar_reader.load_raw_arrays(
columns,
range_open,
range_close,
assets,
)
# Get the index of the close minute for each session in the range.
# If the range contains only one session, the only close in the range
# is the last minute in the data. Otherwise, we need to get all the
# session closes and find their indices in the range of minutes.
if start_session == end_session:
close_ilocs = np.array([len(minute_data[0]) - 1], dtype=np.int64)
else:
minutes = self._calendar.minutes_in_range(
range_open,
range_close,
)
session_closes = self._calendar.session_closes_in_range(
start_session,
end_session,
)
close_ilocs = minutes.searchsorted(session_closes.values)
results = []
shape = (len(close_ilocs), len(assets))
for col in columns:
if col != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
results.append(out)
for i in range(len(assets)):
for j, column in enumerate(columns):
data = minute_data[j][:, i]
minute_to_session(column, close_ilocs, data, results[j][:, i])
return results
@property
def trading_calendar(self):
return self._calendar
def load_raw_arrays(self, columns, start_dt, end_dt, sids):
return self._get_resampled(columns, start_dt, end_dt, sids)
def get_value(self, sid, session, colname):
# WARNING: This will need caching or other optimization if used in a
# tight loop.
# This was developed to complete interface, but has not been tuned
# for real world use.
return self._get_resampled([colname], session, session, [sid])[0][0][0]
@lazyval
def sessions(self):
cal = self._calendar
first = self._minute_bar_reader.first_trading_day
last = cal.minute_to_session_label(
self._minute_bar_reader.last_available_dt)
return cal.sessions_in_range(first, last)
@lazyval
def last_available_dt(self):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.last_available_dt
)
@property
def first_trading_day(self):
return self._minute_bar_reader.first_trading_day
def get_last_traded_dt(self, asset, dt):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.get_last_traded_dt(asset, dt))
class ReindexBarReader(with_metaclass(ABCMeta)):
"""
A base class for readers which reindexes results, filling in the additional
indices with empty data.
Used to align the reading assets which trade on different calendars.
Currently only supports a ``trading_calendar`` which is a superset of the
``reader``'s calendar.
Parameters
----------
- trading_calendar : zipline.utils.trading_calendar.TradingCalendar
The calendar to use when indexing results from the reader.
- reader : MinuteBarReader|SessionBarReader
The reader which has a calendar that is a subset of the desired
``trading_calendar``.
- first_trading_session : pd.Timestamp
The first trading session the reader should provide. Must be specified,
since the ``reader``'s first session may not exactly align with the
desired calendar. Specifically, in the case where the first session
on the target calendar is a holiday on the ``reader``'s calendar.
- last_trading_session : pd.Timestamp
The last trading session the reader should provide. Must be specified,
since the ``reader``'s last session may not exactly align with the
desired calendar. Specifically, in the case where the last session
on the target calendar is a holiday on the ``reader``'s calendar.
"""
def __init__(self,
trading_calendar,
reader,
first_trading_session,
last_trading_session):
self._trading_calendar = trading_calendar
self._reader = reader
self._first_trading_session = first_trading_session
self._last_trading_session = last_trading_session
@property
def last_available_dt(self):
return self._reader.last_available_dt
def get_last_traded_dt(self, sid, dt):
return self._reader.get_last_traded_dt(sid, dt)
@property
def first_trading_day(self):
return self._reader.first_trading_day
def get_value(self, sid, dt, field):
# Give an empty result if no data is present.
try:
return self._reader.get_value(sid, dt, field)
except NoDataOnDate:
if field == 'volume':
return 0
else:
return np.nan
@abstractmethod
def _outer_dts(self, start_dt, end_dt):
raise NotImplementedError
@abstractmethod
def _inner_dts(self, start_dt, end_dt):
raise NotImplementedError
@property
def trading_calendar(self):
return self._trading_calendar
@lazyval
def sessions(self):
return self.trading_calendar.sessions_in_range(
self._first_trading_session,
self._last_trading_session
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
outer_dts = self._outer_dts(start_dt, end_dt)
inner_dts = self._inner_dts(start_dt, end_dt)
indices = outer_dts.searchsorted(inner_dts)
shape = len(outer_dts), len(sids)
outer_results = []
if len(inner_dts) > 0:
inner_results = self._reader.load_raw_arrays(
fields, inner_dts[0], inner_dts[-1], sids)
else:
inner_results = None
for i, field in enumerate(fields):
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
if inner_results is not None:
out[indices] = inner_results[i]
outer_results.append(out)
return outer_results
class ReindexMinuteBarReader(ReindexBarReader, MinuteBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self._trading_calendar.minutes_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.calendar.minutes_in_range(start_dt, end_dt)
class ReindexSessionBarReader(ReindexBarReader, SessionBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self.trading_calendar.sessions_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.trading_calendar.sessions_in_range(
start_dt, end_dt)
| apache-2.0 |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/utils/tests/test_svd.py | 3 | 5384 | # Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD
import numpy as np
from scipy import sparse
from scipy import linalg
from numpy.testing import assert_equal
from numpy.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_randomized_svd_low_rank():
"""Check that extmath.randomized_svd is consistent with linalg.svd"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0, random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_randomized_svd_low_rank_with_noise():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5, random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iterations=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iterations=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
"""Check that extmath.randomized_svd can handle noisy matrices"""
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0, random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iterations=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iterations=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
"""Check that transposing the design matrix has limit impact"""
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5, random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iterations=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iterations=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iterations=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
if __name__ == '__main__':
import nose
nose.runmodule()
| agpl-3.0 |
Bulochkin/tensorflow_pack | tensorflow/contrib/learn/python/learn/tests/dataframe/arithmetic_transform_test.py | 62 | 2343 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arithmetic transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class SumTestCase(test.TestCase):
"""Test class for `Sum` transform."""
def testSum(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a+b"] = frame["a"] + frame["b"]
expected_sum = pandas_df["a"] + pandas_df["b"]
actual_sum = frame.run_one_batch()["a+b"]
np.testing.assert_array_equal(expected_sum, actual_sum)
class DifferenceTestCase(test.TestCase):
"""Test class for `Difference` transform."""
def testDifference(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a-b"] = frame["a"] - frame["b"]
expected_diff = pandas_df["a"] - pandas_df["b"]
actual_diff = frame.run_one_batch()["a-b"]
np.testing.assert_array_equal(expected_diff, actual_diff)
if __name__ == "__main__":
test.main()
| apache-2.0 |
rajat1994/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 114 | 25281 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
| bsd-3-clause |
ChrisThoung/fsic | examples/godley-lavoie_2007/5_lp1.py | 1 | 8990 | # -*- coding: utf-8 -*-
"""
5_lp1
=====
FSIC implementation of Model *LP1*, a model of long-term bonds, capital gains
and liquidity preference, from Chapter 5 of Godley and Lavoie (2007). Parameter
values come from Zezza (2006).
Godley and Lavoie (2007) analyse Model *LP1* beginning from an initial
stationary state. This script first finds that stationary state, matching (more
or less) the starting values in Zezza's (2006) EViews script. The script then
analyses the impact of an increase in both short- and long-term interest rates,
as in Godley and Lavoie (2007).
This example also shows how to use conditional expressions in a model
definition to avoid generating NaNs (from divide-by-zero operations). This is
new in FSIC version 0.5.0.dev and an alternative to handling NaNs using the
`errors` keyword argument in `solve()`.
While FSIC only requires NumPy, this example also uses:
* `pandas`, to generate a DataFrame of results using `fsictools`
* `matplotlib`, to replicate, from Godley and Lavoie (2007):
* Figure 5.2: Evolution of the wealth to disposable income ratio, following
an increase in both the short-term and long-term interest
rates
* Figure 5.3: Evolution of household consumption and disposable income,
following an increase in both the short-term and long-term
interest rates
* Figure 5.4: Evolution of the bonds to wealth ratio and the bills to
wealth ratio, following an increase from 3% to 4% in the
short-term interest rate, while the long-term interest rate
moves from 5% to 6.67%
Outputs:
1. Replicates Figures 5.2, 5.3 and 5.4 of Godley and Lavoie (2007), saving the
charts to 'figures-5.2,5.3,5.4.png'
References:
Godley, W., Lavoie, M. (2007),
*Monetary economics: an integrated approach to
credit, money, income, production and wealth*,
Palgrave Macmillan
Zezza, G. (2006),
'EViews macros for building models in *Wynne Godley and Marc Lavoie*
Monetary Economics: an integrated approach to
credit, money, income, production and wealth',
http://gennaro.zezza.it/software/eviews/glch05.php
"""
import matplotlib.pyplot as plt
import pandas as pd
import fsic
import fsictools
# Inline comments give the corresponding equation numbers from Godley and
# Lavoie (2007) - for reference only; FSIC ignores comments, just as Python
# does.
# 'A' suffix indicates a slight amendment to be compatible with the FSIC
# parser.
script = '''
Y = C + G # 5.1
YDr = Y - T + r_b[-1] * Bh[-1] + BLh[-1] # 5.2
T = {theta} * (Y + r_b[-1] * Bh[-1] + BLh[-1]) # 5.3
V = V[-1] + (YDr - C) + CG # 5.4
CG = (p_bL - p_bL[-1]) * BLh[-1] # 5.5A
C = {alpha_1} * YDr_e + {alpha_2} * V[-1] # 5.6
V_e = V[-1] + (YDr_e - C) + CG # 5.7
Hh = V - Bh - (p_bL * BLh) # 5.8
Hd = V_e - Bd - (p_bL * BLd) # 5.9
Bd = V_e * ({lambda_20} + # 5.10A
{lambda_22} * r_b +
{lambda_23} * ERr_bL +
# Note the conditional expression to guard against NaNs
{lambda_24} * (YDr_e / V_e if V_e > 0 else 0))
BLd = V_e / p_bL * ({lambda_30} + # 5.11
{lambda_32} * r_b +
{lambda_33} * ERr_bL +
# Note the conditional expression to guard against NaNs
{lambda_34} * (YDr_e / V_e if V_e > 0 else 0))
Bh = Bd # 5.12
BLh = BLd # 5.13
Bs = (Bs[-1] + # 5.14A
(G + r_b[-1] * Bs[-1] + BLs[-1]) -
(T + r_b[-1] * Bcb[-1]) -
((BLs - BLs[-1]) * p_bL))
Hs = Hs[-1] + (Bcb - Bcb[-1]) # 5.15A
Bcb = Bs - Bh # 5.16
BLs = BLh # 5.17
ERr_bL = r_bL + {chi} * (p_bL_e - p_bL) / p_bL # 5.18
r_bL = 1 / p_bL # 5.19
p_bL_e = p_bL # 5.20
CG_e = {chi} * (p_bL_e - p_bL) * BLh # 5.21
YDr_e = YDr[-1] # 5.22
r_b = r_b_bar # 5.23
p_bL = p_bL_bar # 5.24
'''
symbols = fsic.parse_model(script)
LP1 = fsic.build_model(symbols)
if __name__ == '__main__':
# 1. Find the stationary state of the model from an initial set of
# parameter values (from Zezza, 2006)
# Note that Zezza (2006) uses entirely positive values for the lambda
# parameters, setting the signs in the equation specifications. Below,
# the equations (and signs) follow Godley and Lavoie (2007), reversing
# the signs on the parameter values below as needed
starting_from_zero = LP1(range(100), # Enough periods to reach the stationary state
alpha_1=0.8, alpha_2=0.2, chi=0.1,
lambda_20=0.44196, lambda_22=1.1, lambda_23=-1, lambda_24=-0.03,
lambda_30=0.3997, lambda_32=-1, lambda_33=1.1, lambda_34=-0.03)
# Fiscal policy
starting_from_zero.G = 20
starting_from_zero.theta = 0.1938
# Monetary policy
starting_from_zero.r_b_bar = 0.03
starting_from_zero.p_bL_bar = 20
# Copy values for first period (not solved)
starting_from_zero.r_b[0] = starting_from_zero.r_b_bar[0]
starting_from_zero.p_bL[0] = starting_from_zero.p_bL_bar[0]
# Solve and take the results from the last period as the stationary state
starting_from_zero.solve()
stationary_state = dict(zip(starting_from_zero.names,
starting_from_zero.values[:, -1]))
# 2. Starting from that stationary state, simulate an increase in the
# short- and long-term interest rates
interest_rate_scenario = LP1(range(1945, 2010 + 1), **stationary_state)
interest_rate_scenario['r_b_bar', 1960:] = 0.04
interest_rate_scenario['p_bL_bar', 1960:] = 15
interest_rate_scenario.solve()
# 3. Reproduce Figures 5.2, 5.3 and 5.4 of Godley and Lavoie (2007)
results = fsictools.model_to_dataframe(interest_rate_scenario)
# Construct ratios for graphing
results['V:YD'] = results.eval('V / YDr')
results['Bh:V'] = results.eval('Bh / V')
results['BLh:V'] = results.eval('p_bL * BLh / V')
# Select same timespan as the original charts
results_to_plot = results.loc[1950:2000, :]
# Set up plot areas
_, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(18, 5))
plt.suptitle('Effects of an increase in the short- and long-term interest rates')
# Recreate Figure 5.2 (wealth-to-disposable-income ratio)
ax1.set_title('Evolution of the wealth-to-disposable-income ratio')
ax1.plot(results_to_plot.index, results_to_plot['V:YD'].values,
label='Wealth-to-disposable-income ratio',
color='#33C3F0')
ax1.set_xlim(min(results_to_plot.index), max(results_to_plot.index))
ax1.legend()
# Recreate Figure 5.3 (household consumption and disposable income)
ax2.set_title('Evolution of household consumption and disposable income')
ax2.plot(results_to_plot.index, results_to_plot['YDr'].values,
label='Disposable income',
color='#33C3F0')
ax2.plot(results_to_plot.index, results_to_plot['C'].values,
label='Consumption',
color='#FF4F2E', linestyle='--')
ax2.set_xlim(min(results_to_plot.index), max(results_to_plot.index))
ax2.legend()
# Recreate Figure 5.4 (bonds-to-wealth and bills-to-wealth ratios)
ax3.set_title('Evolution of the bonds-to-wealth and bills-to-wealth ratios')
ax3.plot(results_to_plot.index, results_to_plot['Bh:V'] * 100,
label='Bills-to-wealth ratio',
color='#33C3F0')
ax3.plot(results_to_plot.index, results_to_plot['BLh:V'] * 100,
label='Bonds-to-wealth ratio',
color='#FF4F2E', linestyle='--')
ax3.set_xlim(min(results_to_plot.index), max(results_to_plot.index))
ax3.set_ylabel('%')
ax3.legend()
plt.savefig('figures-5.2,5.3,5.4.png')
| mit |
rimio/wifi-rc | BehavioralCloning/model.py | 1 | 5118 | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.layers import Lambda, Conv2D, MaxPooling2D, Dropout, Dense, Flatten
from utils import INPUT_SHAPE, batch_generator
import argparse
import os
np.random.seed(0)
def read_DB(args):
allPath = []
allPath.append(args.data_dir+"/data2/data_db.txt")
allPath.append(args.data_dir+"/data3/data_db.txt")
allPath.append(args.data_dir+"/data4/data_db.txt")
allPath.append(args.data_dir+"/data5/data_db.txt")
allPath.append(args.data_dir+"/dataRetrig/data_db.txt")
#allPath.append(args.data_dir+"/dataNew/data_db.txt")
#allPath.append(args.data_dir+"/data6/data_db.txt")
#path = os.path.join(args.data_dir,'data_db.txt')
X = []
Y = []
for path in allPath:
with open(path) as f:
for line in f:
tokens = line.split()
X.append(tokens[0])
steer = float(tokens[1])
throttle = float(tokens[2])
if (throttle>0.0) :
throttle = 1.0
if (throttle<0.2):
throttle = -1.0
#if (steer > 0.6): steer = 1.0
#if (steer < -0.6): steer = -1.0
Y.append("{0:.3f} {0:.3f}".format(steer,throttle))
return X,Y
def load_data(args):
"""
Load training data and split it into training and validation set
"""
X,y = read_DB(args)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=args.test_size, random_state=0)
return X_train, X_valid, y_train, y_valid
def build_model(args):
"""
Modified NVIDIA model
"""
model = Sequential()
model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))
model.add(Conv2D(24, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Conv2D(36, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Conv2D(48, 5, 5, activation='elu', subsample=(2, 2)))
model.add(Conv2D(64, 3, 3, activation='elu'))
model.add(Conv2D(64, 3, 3, activation='elu'))
model.add(Dropout(args.keep_prob))
model.add(Flatten())
model.add(Dense(256, activation='elu'))
model.add(Dense(128, activation='elu'))
model.add(Dense(64, activation='elu'))
model.add(Dropout(args.keep_prob))
model.add(Dense(32, activation='elu'))
model.add(Dense(2))
model.summary()
return model
def train_model(model, args, X_train, X_valid, y_train, y_valid):
"""
Train the model
"""
checkpoint = ModelCheckpoint('modelThrottle1-{val_loss:03f}.h5',
monitor='val_loss',
verbose=1,
mode='auto')
#save_best_only=args.save_best_only,
model.compile(loss='mean_squared_error', optimizer=Adam(lr=args.learning_rate))
model.fit_generator(batch_generator(args.data_dir, X_train, y_train, args.batch_size, True),
len(X_train),
nb_epoch=args.nb_epoch,
max_q_size=1,
validation_data=batch_generator(args.data_dir, X_valid, y_valid, args.batch_size, False),
nb_val_samples=len(X_valid),
callbacks=[checkpoint],
verbose=1)
def s2b(s):
"""
Converts a string to boolean value
"""
s = s.lower()
return s == 'true' or s == 'yes' or s == 'y' or s == '1'
def main():
"""
Load train/validation data set and train the model
"""
parser = argparse.ArgumentParser(description='Behavioral Cloning Training Program')
parser.add_argument('-d', help='data directory', dest='data_dir', type=str, default='/home/andi/Desktop/Wi-FiRCCar/wifi-rc/dataSets')
parser.add_argument('-t', help='test size fraction', dest='test_size', type=float, default=0.1)
parser.add_argument('-k', help='drop out probability', dest='keep_prob', type=float, default=0.5)
parser.add_argument('-n', help='number of epochs', dest='nb_epoch', type=int, default=20)
parser.add_argument('-s', help='samples per epoch', dest='samples_per_epoch', type=int, default=20000)
parser.add_argument('-b', help='batch size', dest='batch_size', type=int, default=200)
parser.add_argument('-o', help='save best models only', dest='save_best_only', type=s2b, default='true')
parser.add_argument('-l', help='learning rate', dest='learning_rate', type=float, default=1.0e-3)
args = parser.parse_args()
print('-' * 30)
print('Parameters')
print('-' * 30)
for key, value in vars(args).items():
print('{:<20} := {}'.format(key, value))
print('-' * 30)
data = load_data(args)
model = build_model(args)
train_model(model, args, *data)
if __name__ == '__main__':
main()
| gpl-3.0 |
nhejazi/scikit-learn | examples/linear_model/plot_ols_3d.py | 53 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
# #############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
jreback/pandas | pandas/tests/io/excel/test_openpyxl.py | 2 | 3933 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.excel import ExcelWriter, _OpenpyxlWriter
openpyxl = pytest.importorskip("openpyxl")
pytestmark = pytest.mark.parametrize("ext", [".xlsx"])
def test_to_excel_styleconverter(ext):
from openpyxl import styles
hstyle = {
"font": {"color": "00FF0000", "bold": True},
"borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"},
"fill": {"patternType": "solid", "fgColor": {"rgb": "006666FF", "tint": 0.3}},
"number_format": {"format_code": "0.00"},
"protection": {"locked": True, "hidden": False},
}
font_color = styles.Color("00FF0000")
font = styles.Font(bold=True, color=font_color)
side = styles.Side(style=styles.borders.BORDER_THIN)
border = styles.Border(top=side, right=side, bottom=side, left=side)
alignment = styles.Alignment(horizontal="center", vertical="top")
fill_color = styles.Color(rgb="006666FF", tint=0.3)
fill = styles.PatternFill(patternType="solid", fgColor=fill_color)
number_format = "0.00"
protection = styles.Protection(locked=True, hidden=False)
kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle)
assert kw["font"] == font
assert kw["border"] == border
assert kw["alignment"] == alignment
assert kw["fill"] == fill
assert kw["number_format"] == number_format
assert kw["protection"] == protection
def test_write_cells_merge_styled(ext):
from pandas.io.formats.excel import ExcelCell
sheet_name = "merge_styled"
sty_b1 = {"font": {"color": "00FF0000"}}
sty_a2 = {"font": {"color": "0000FF00"}}
initial_cells = [
ExcelCell(col=1, row=0, val=42, style=sty_b1),
ExcelCell(col=0, row=1, val=99, style=sty_a2),
]
sty_merged = {"font": {"color": "000000FF", "bold": True}}
sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged)
openpyxl_sty_merged = sty_kwargs["font"]
merge_cells = [
ExcelCell(
col=0, row=0, val="pandas", mergestart=1, mergeend=1, style=sty_merged
)
]
with tm.ensure_clean(ext) as path:
with _OpenpyxlWriter(path) as writer:
writer.write_cells(initial_cells, sheet_name=sheet_name)
writer.write_cells(merge_cells, sheet_name=sheet_name)
wks = writer.sheets[sheet_name]
xcell_b1 = wks["B1"]
xcell_a2 = wks["A2"]
assert xcell_b1.font == openpyxl_sty_merged
assert xcell_a2.font == openpyxl_sty_merged
@pytest.mark.parametrize(
"mode,expected", [("w", ["baz"]), ("a", ["foo", "bar", "baz"])]
)
def test_write_append_mode(ext, mode, expected):
df = DataFrame([1], columns=["baz"])
with tm.ensure_clean(ext) as f:
wb = openpyxl.Workbook()
wb.worksheets[0].title = "foo"
wb.worksheets[0]["A1"].value = "foo"
wb.create_sheet("bar")
wb.worksheets[1]["A1"].value = "bar"
wb.save(f)
with ExcelWriter(f, engine="openpyxl", mode=mode) as writer:
df.to_excel(writer, sheet_name="baz", index=False)
wb2 = openpyxl.load_workbook(f)
result = [sheet.title for sheet in wb2.worksheets]
assert result == expected
for index, cell_value in enumerate(expected):
assert wb2.worksheets[index]["A1"].value == cell_value
def test_to_excel_with_openpyxl_engine(ext):
# GH 29854
with tm.ensure_clean(ext) as filename:
df1 = DataFrame({"A": np.linspace(1, 10, 10)})
df2 = DataFrame({"B": np.linspace(1, 20, 10)})
df = pd.concat([df1, df2], axis=1)
styled = df.style.applymap(
lambda val: "color: %s" % ("red" if val < 0 else "black")
).highlight_max()
styled.to_excel(filename, engine="openpyxl")
| bsd-3-clause |
KellenSunderland/sockeye | setup.py | 1 | 3763 | import sys
import os
import re
import logging
import argparse
import subprocess
from setuptools import setup, find_packages
from contextlib import contextmanager
ROOT = os.path.dirname(__file__)
def get_long_description():
with open(os.path.join(ROOT, 'README.md'), encoding='utf-8') as f:
markdown_txt = f.read()
try:
import pypandoc
long_description = pypandoc.convert(markdown_txt, 'rst', format='md')
except(IOError, ImportError):
logging.warning("Could not import package 'pypandoc'. Will not convert markdown readme to rst for PyPI.")
long_description = markdown_txt
return long_description
def get_version():
VERSION_RE = re.compile(r'''__version__ = ['"]([0-9.]+)['"]''')
init = open(os.path.join(ROOT, 'sockeye', '__init__.py')).read()
return VERSION_RE.search(init).group(1)
def get_git_hash():
try:
sp = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_str = sp.communicate()[0].decode("utf-8").strip()
return out_str
except:
return "unkown"
@contextmanager
def temporarily_write_git_hash(git_hash, filename=os.path.join('sockeye', 'git_version.py')):
"""Temporarily create a module git_version in sockeye so that it will be included when installing and packaging."""
content = """
# This file is automatically generated in setup.py
git_hash = "%s"
""" % git_hash
if os.path.exists(filename):
raise RuntimeError("%s already exists, will not overwrite" % filename)
with open(filename, "w") as out:
out.write(content)
try:
yield
except:
raise
finally:
os.remove(filename)
def get_requirements(filename):
with open(os.path.join(ROOT, filename)) as f:
return [line.rstrip() for line in f]
try:
from sphinx.setup_command import BuildDoc
cmdclass = {'build_sphinx': BuildDoc}
except:
logging.warning("Package 'sphinx' not found. You will not be able to build docs.")
cmdclass = {}
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-r', '--requirement', help='Optionally specify a different requirements.txt file.', required=False)
args, unparsed_args = parser.parse_known_args()
sys.argv[1:] = unparsed_args
if args.requirement is None:
install_requires = get_requirements('requirements.txt')
else:
install_requires = get_requirements(args.requirement)
args = dict(
name='sockeye',
version=get_version(),
description='Sequence-to-Sequence framework for Neural Machine Translation',
long_description=get_long_description(),
url='https://github.com/awslabs/sockeye',
author='Amazon',
author_email='sockeye-dev@amazon.com',
maintainer_email='sockeye-dev@amazon.com',
license='Apache License 2.0',
python_requires='>=3',
packages=find_packages(exclude=("test",)),
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-cov'],
extras_require={
'optional': ['tensorboard', 'matplotlib'],
'dev': get_requirements('requirements.dev.txt')
},
install_requires=install_requires,
entry_points={
'console_scripts': [
'sockeye-train = sockeye.train:main',
'sockeye-translate = sockeye.translate:main',
'sockeye-average = sockeye.average:main',
'sockeye-embeddings = sockeye.embeddings:main',
'sockeye-evaluate = sockeye.evaluate:main'
],
},
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
],
cmdclass=cmdclass,
)
with temporarily_write_git_hash(get_git_hash()):
setup(**args)
| apache-2.0 |
mxjl620/scikit-learn | sklearn/utils/extmath.py | 70 | 21951 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
pybrain2/pybrain2 | examples/rl/environments/shipsteer/shipbench_sde.py | 26 | 3454 | from __future__ import print_function
#!/usr/bin/env python
#########################################################################
# Reinforcement Learning with SPE on the ShipSteering Environment
#
# Requirements:
# pybrain (tested on rev. 1195, ship env rev. 1202)
# Synopsis:
# shipbenchm.py [<True|False> [logfile]]
# (first argument is graphics flag)
#########################################################################
__author__ = "Martin Felder, Thomas Rueckstiess"
__version__ = '$Id$'
#---
# default backend GtkAgg does not plot properly on Ubuntu 8.04
import matplotlib
matplotlib.use('TkAgg')
#---
from pybrain.rl.environments.shipsteer import ShipSteeringEnvironment
from pybrain.rl.environments.shipsteer import GoNorthwardTask
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners.directsearch.enac import ENAC
from pybrain.rl.experiments.episodic import EpisodicExperiment
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.plotting import MultilinePlotter
from pylab import figure, ion
from scipy import mean
import sys
if len(sys.argv) > 1:
useGraphics = eval(sys.argv[1])
else:
useGraphics = False
# create task
env=ShipSteeringEnvironment()
maxsteps = 500
task = GoNorthwardTask(env=env, maxsteps = maxsteps)
# task.env.setRenderer( CartPoleRenderer())
# create controller network
#net = buildNetwork(task.outdim, 7, task.indim, bias=True, outputbias=False)
net = buildNetwork(task.outdim, task.indim, bias=False)
#net.initParams(0.0)
# create agent
learner = ENAC()
learner.gd.rprop = True
# only relevant for RP
learner.gd.deltamin = 0.0001
#agent.learner.gd.deltanull = 0.05
# only relevant for BP
learner.gd.alpha = 0.01
learner.gd.momentum = 0.9
agent = LearningAgent(net, learner)
agent.actaspg = False
# create experiment
experiment = EpisodicExperiment(task, agent)
# print weights at beginning
print(agent.module.params)
rewards = []
if useGraphics:
figure()
ion()
pl = MultilinePlotter(autoscale=1.2, xlim=[0, 50], ylim=[0, 1])
pl.setLineStyle(linewidth=2)
# queued version
# experiment._fillQueue(30)
# while True:
# experiment._stepQueueLoop()
# # rewards.append(mean(agent.history.getSumOverSequences('reward')))
# print agent.module.getParameters(),
# print mean(agent.history.getSumOverSequences('reward'))
# clf()
# plot(rewards)
# episodic version
x = 0
batch = 30 #number of samples per gradient estimate (was: 20; more here due to stochastic setting)
while x<5000:
#while True:
experiment.doEpisodes(batch)
x += batch
reward = mean(agent.history.getSumOverSequences('reward'))*task.rewardscale
if useGraphics:
pl.addData(0,x,reward)
print(agent.module.params)
print(reward)
#if reward > 3:
# pass
agent.learn()
agent.reset()
if useGraphics:
pl.update()
if len(sys.argv) > 2:
agent.history.saveToFile(sys.argv[1], protocol=-1, arraysonly=True)
if useGraphics:
pl.show( popup = True)
#To view what the simulation is doing at the moment set the environment with True, go to pybrain/rl/environments/ode/ and start viewer.py (python-openGL musst be installed, see PyBrain documentation)
## performance:
## experiment.doEpisodes(5) * 100 without weave:
## real 2m39.683s
## user 2m33.358s
## sys 0m5.960s
## experiment.doEpisodes(5) * 100 with weave:
##real 2m41.275s
##user 2m35.310s
##sys 0m5.192s
##
| bsd-3-clause |
robintw/scikit-image | doc/examples/plot_threshold_adaptive.py | 22 | 1307 | """
=====================
Adaptive Thresholding
=====================
Thresholding is the simplest way to segment objects from a background. If that
background is relatively uniform, then you can use a global threshold value to
binarize the image by pixel-intensity. If there's large variation in the
background intensity, however, adaptive thresholding (a.k.a. local or dynamic
thresholding) may produce better results.
Here, we binarize an image using the `threshold_adaptive` function, which
calculates thresholds in regions of size `block_size` surrounding each pixel
(i.e. local neighborhoods). Each threshold value is the weighted mean of the
local neighborhood minus an offset value.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.filters import threshold_otsu, threshold_adaptive
image = data.page()
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 40
binary_adaptive = threshold_adaptive(image, block_size, offset=10)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(image)
ax0.set_title('Image')
ax1.imshow(binary_global)
ax1.set_title('Global thresholding')
ax2.imshow(binary_adaptive)
ax2.set_title('Adaptive thresholding')
for ax in axes:
ax.axis('off')
plt.show()
| bsd-3-clause |
rosswhitfield/mantid | qt/applications/workbench/workbench/plotting/test/test_figureinteraction.py | 3 | 31775 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
# system imports
import unittest
# third-party library imports
import matplotlib
matplotlib.use('AGG') # noqa
import matplotlib.pyplot as plt
import numpy as np
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QMenu
from testhelpers import assert_almost_equal
# local package imports
from mantid.plots import MantidAxes
from unittest.mock import MagicMock, PropertyMock, call, patch
from mantid.simpleapi import CreateWorkspace
from mantidqt.plotting.figuretype import FigureType
from mantidqt.plotting.functions import plot, pcolormesh_from_names, plot_contour, pcolormesh
from mantidqt.utils.qt.testing import start_qapplication
from workbench.plotting.figureinteraction import FigureInteraction, LogNorm
@start_qapplication
class FigureInteractionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ws = CreateWorkspace(
DataX=np.array([10, 20, 30], dtype=np.float64),
DataY=np.array([2, 3], dtype=np.float64),
DataE=np.array([0.02, 0.02], dtype=np.float64),
Distribution=False,
UnitX='Wavelength',
YUnitLabel='Counts',
OutputWorkspace='ws')
cls.ws1 = CreateWorkspace(
DataX=np.array([11, 21, 31], dtype=np.float64),
DataY=np.array([3, 4], dtype=np.float64),
DataE=np.array([0.03, 0.03], dtype=np.float64),
Distribution=False,
UnitX='Wavelength',
YUnitLabel='Counts',
OutputWorkspace='ws1')
# initialises the QApplication
super(cls, FigureInteractionTest).setUpClass()
@classmethod
def tearDownClass(cls):
cls.ws.delete()
cls.ws1.delete()
def setUp(self):
fig_manager = self._create_mock_fig_manager_to_accept_right_click()
fig_manager.fit_browser.tool = None
self.interactor = FigureInteraction(fig_manager)
self.fig, self.ax = plt.subplots() # type: matplotlib.figure.Figure, MantidAxes
def tearDown(self):
plt.close('all')
del self.fig
del self.ax
del self.interactor
# Success tests
def test_construction_registers_handler_for_button_press_event(self):
fig_manager = MagicMock()
fig_manager.canvas = MagicMock()
interactor = FigureInteraction(fig_manager)
expected_call = [
call('button_press_event', interactor.on_mouse_button_press),
call('button_release_event', interactor.on_mouse_button_release),
call('draw_event', interactor.draw_callback),
call('motion_notify_event', interactor.motion_event),
call('resize_event', interactor.mpl_redraw_annotations),
call('figure_leave_event', interactor.on_leave),
call('axis_leave_event', interactor.on_leave),
call('scroll_event', interactor.on_scroll)
]
fig_manager.canvas.mpl_connect.assert_has_calls(expected_call)
self.assertEqual(len(expected_call), fig_manager.canvas.mpl_connect.call_count)
def test_disconnect_called_for_each_registered_handler(self):
fig_manager = MagicMock()
canvas = MagicMock()
fig_manager.canvas = canvas
interactor = FigureInteraction(fig_manager)
interactor.disconnect()
self.assertEqual(interactor.nevents, canvas.mpl_disconnect.call_count)
@patch('workbench.plotting.figureinteraction.QMenu',
autospec=True)
@patch('workbench.plotting.figureinteraction.figure_type',
autospec=True)
def test_right_click_gives_no_context_menu_for_empty_figure(self, mocked_figure_type,
mocked_qmenu):
fig_manager = self._create_mock_fig_manager_to_accept_right_click()
interactor = FigureInteraction(fig_manager)
mouse_event = self._create_mock_right_click()
mocked_figure_type.return_value = FigureType.Empty
with patch.object(interactor.toolbar_manager, 'is_tool_active',
lambda: False):
interactor.on_mouse_button_press(mouse_event)
self.assertEqual(0, mocked_qmenu.call_count)
@patch('workbench.plotting.figureinteraction.QMenu',
autospec=True)
@patch('workbench.plotting.figureinteraction.figure_type',
autospec=True)
def test_right_click_gives_context_menu_for_color_plot(self, mocked_figure_type,
mocked_qmenu):
fig_manager = self._create_mock_fig_manager_to_accept_right_click()
interactor = FigureInteraction(fig_manager)
mouse_event = self._create_mock_right_click()
mocked_figure_type.return_value = FigureType.Image
# Expect a call to QMenu() for the outer menu followed by three more calls
# for the Axes, Normalization and Colorbar menus
qmenu_call1 = MagicMock()
qmenu_call2 = MagicMock()
qmenu_call3 = MagicMock()
qmenu_call4 = MagicMock()
mocked_qmenu.side_effect = [qmenu_call1, qmenu_call2, qmenu_call3, qmenu_call4]
with patch('workbench.plotting.figureinteraction.QActionGroup',
autospec=True):
with patch.object(interactor.toolbar_manager, 'is_tool_active',
lambda: False):
interactor.on_mouse_button_press(mouse_event)
self.assertEqual(0, qmenu_call1.addAction.call_count)
expected_qmenu_calls = [call(),
call("Axes", qmenu_call1),
call("Normalization", qmenu_call1),
call("Color bar", qmenu_call1)]
self.assertEqual(expected_qmenu_calls, mocked_qmenu.call_args_list)
# 4 actions in Axes submenu
self.assertEqual(4, qmenu_call2.addAction.call_count)
# 2 actions in Normalization submenu
self.assertEqual(2, qmenu_call3.addAction.call_count)
# 2 actions in Colorbar submenu
self.assertEqual(2, qmenu_call4.addAction.call_count)
@patch('workbench.plotting.figureinteraction.QMenu',
autospec=True)
@patch('workbench.plotting.figureinteraction.figure_type',
autospec=True)
def test_right_click_gives_context_menu_for_plot_without_fit_enabled(self, mocked_figure_type,
mocked_qmenu_cls):
fig_manager = self._create_mock_fig_manager_to_accept_right_click()
fig_manager.fit_browser.tool = None
interactor = FigureInteraction(fig_manager)
mouse_event = self._create_mock_right_click()
mouse_event.inaxes.get_xlim.return_value = (1, 2)
mouse_event.inaxes.get_ylim.return_value = (1, 2)
mouse_event.inaxes.lines = []
mocked_figure_type.return_value = FigureType.Line
# Expect a call to QMenu() for the outer menu followed by two more calls
# for the Axes and Normalization menus
qmenu_call1 = MagicMock()
qmenu_call2 = MagicMock()
qmenu_call3 = MagicMock()
qmenu_call4 = MagicMock()
mocked_qmenu_cls.side_effect = [qmenu_call1, qmenu_call2, qmenu_call3, qmenu_call4]
with patch('workbench.plotting.figureinteraction.QActionGroup',
autospec=True):
with patch.object(interactor.toolbar_manager, 'is_tool_active',
lambda: False):
with patch.object(interactor, 'add_error_bars_menu', MagicMock()):
interactor.on_mouse_button_press(mouse_event)
self.assertEqual(0, qmenu_call1.addSeparator.call_count)
self.assertEqual(0, qmenu_call1.addAction.call_count)
expected_qmenu_calls = [call(),
call("Axes", qmenu_call1),
call("Normalization", qmenu_call1),
call("Markers", qmenu_call1)]
self.assertEqual(expected_qmenu_calls, mocked_qmenu_cls.call_args_list)
# 4 actions in Axes submenu
self.assertEqual(4, qmenu_call2.addAction.call_count)
# 2 actions in Normalization submenu
self.assertEqual(2, qmenu_call3.addAction.call_count)
# 3 actions in Markers submenu
self.assertEqual(3, qmenu_call4.addAction.call_count)
def test_toggle_normalization_no_errorbars(self):
self._test_toggle_normalization(errorbars_on=False, plot_kwargs={'distribution': True})
def test_toggle_normalization_with_errorbars(self):
self._test_toggle_normalization(errorbars_on=True, plot_kwargs={'distribution': True})
def test_correct_yunit_label_when_overplotting_after_normalization_toggle(self):
# The earlier version of Matplotlib on RHEL throws an error when performing the second
# plot in this test, if the lines have errorbars. The error occurred when it attempted
# to draw an interactive legend. Plotting without errors still fulfills the purpose of this
# test, so turn them off for old Matplotlib versions.
errors = True
if int(matplotlib.__version__[0]) < 2:
errors = False
fig = plot([self.ws], spectrum_nums=[1], errors=errors,
plot_kwargs={'distribution': True})
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
ax = fig.axes[0]
fig_interactor._toggle_normalization(ax)
self.assertEqual(r"Counts ($\AA$)$^{-1}$", ax.get_ylabel())
plot([self.ws1], spectrum_nums=[1], errors=errors, overplot=True, fig=fig)
self.assertEqual(r"Counts ($\AA$)$^{-1}$", ax.get_ylabel())
def test_normalization_toggle_with_no_autoscale_on_update_no_errors(self):
self._test_toggle_normalization(errorbars_on=False,
plot_kwargs={'distribution': True, 'autoscale_on_update': False})
def test_normalization_toggle_with_no_autoscale_on_update_with_errors(self):
self._test_toggle_normalization(errorbars_on=True,
plot_kwargs={'distribution': True, 'autoscale_on_update': False})
def test_add_error_bars_menu(self):
self.ax.errorbar([0, 15000], [0, 14000], yerr=[10, 10000], label='MyLabel 2')
self.ax.containers[0][2][0].axes.creation_args = [{'errorevery': 1}]
main_menu = QMenu()
self.interactor.add_error_bars_menu(main_menu, self.ax)
# Check the expected sub-menu with buttons is added
added_menu = main_menu.children()[1]
self.assertTrue(
any(FigureInteraction.SHOW_ERROR_BARS_BUTTON_TEXT == child.text() for child in added_menu.children()))
self.assertTrue(
any(FigureInteraction.HIDE_ERROR_BARS_BUTTON_TEXT == child.text() for child in added_menu.children()))
def test_context_menu_not_added_for_scripted_plot_without_errors(self):
self.ax.plot([0, 15000], [0, 15000], label='MyLabel')
self.ax.plot([0, 15000], [0, 14000], label='MyLabel 2')
main_menu = QMenu()
# QMenu always seems to have 1 child when empty,
# but just making sure the count as expected at this point in the test
self.assertEqual(1, len(main_menu.children()))
# plot above doesn't have errors, nor is a MantidAxes
# so no context menu will be added
self.interactor.add_error_bars_menu(main_menu, self.ax)
# number of children should remain unchanged
self.assertEqual(1, len(main_menu.children()))
def test_scripted_plot_line_without_label_handled_properly(self):
# having the special nolabel is usually present on lines with errors,
# but sometimes can be present on lines without errors, this test covers that case
self.ax.plot([0, 15000], [0, 15000], label='_nolegend_')
self.ax.plot([0, 15000], [0, 15000], label='_nolegend_')
main_menu = QMenu()
# QMenu always seems to have 1 child when empty,
# but just making sure the count as expected at this point in the test
self.assertEqual(1, len(main_menu.children()))
# plot above doesn't have errors, nor is a MantidAxes
# so no context menu will be added for error bars
self.interactor.add_error_bars_menu(main_menu, self.ax)
# number of children should remain unchanged
self.assertEqual(1, len(main_menu.children()))
def test_context_menu_added_for_scripted_plot_with_errors(self):
self.ax.plot([0, 15000], [0, 15000], label='MyLabel')
self.ax.errorbar([0, 15000], [0, 14000], yerr=[10, 10000], label='MyLabel 2')
self.ax.containers[0][2][0].axes.creation_args = [{'errorevery': 1}]
main_menu = QMenu()
# QMenu always seems to have 1 child when empty,
# but just making sure the count as expected at this point in the test
self.assertEqual(1, len(main_menu.children()))
# plot above doesn't have errors, nor is a MantidAxes
# so no context menu will be added
self.interactor.add_error_bars_menu(main_menu, self.ax)
added_menu = main_menu.children()[1]
# actions should have been added now, which for this case are only `Show all` and `Hide all`
self.assertTrue(
any(FigureInteraction.SHOW_ERROR_BARS_BUTTON_TEXT == child.text() for child in added_menu.children()))
self.assertTrue(
any(FigureInteraction.HIDE_ERROR_BARS_BUTTON_TEXT == child.text() for child in added_menu.children()))
def test_context_menu_includes_plot_type_if_plot_has_multiple_lines(self):
fig, self.ax = plt.subplots(subplot_kw={'projection': 'mantid'})
self.ax.plot([0, 1], [0, 1])
self.ax.plot([0, 1], [0, 1])
main_menu = QMenu()
# QMenu always seems to have 1 child when empty,
# but just making sure the count as expected at this point in the test
self.assertEqual(1, len(main_menu.children()))
self.interactor._add_plot_type_option_menu(main_menu, self.ax)
added_menu = main_menu.children()[1]
self.assertEqual(added_menu.children()[0].text(), "Plot Type")
def test_context_menu_does_not_include_plot_type_if_plot_has_one_line(self):
fig, self.ax = plt.subplots(subplot_kw={'projection': 'mantid'})
self.ax.errorbar([0, 1], [0, 1], capsize=1)
main_menu = QMenu()
# QMenu always seems to have 1 child when empty,
# but just making sure the count as expected at this point in the test
self.assertEqual(1, len(main_menu.children()))
self.interactor._add_plot_type_option_menu(main_menu, self.ax)
# Number of children should remain unchanged
self.assertEqual(1, len(main_menu.children()))
def test_scripted_plot_show_and_hide_all(self):
self.ax.plot([0, 15000], [0, 15000], label='MyLabel')
self.ax.errorbar([0, 15000], [0, 14000], yerr=[10, 10000], label='MyLabel 2')
self.ax.containers[0][2][0].axes.creation_args = [{'errorevery': 1}]
anonymous_menu = QMenu()
# this initialises some of the class internals
self.interactor.add_error_bars_menu(anonymous_menu, self.ax)
self.assertTrue(self.ax.containers[0][2][0].get_visible())
self.interactor.errors_manager.toggle_all_errors(self.ax, make_visible=False)
self.assertFalse(self.ax.containers[0][2][0].get_visible())
# make the menu again, this updates the internal state of the errors manager
# and is what actually happens when the user opens the menu again
self.interactor.add_error_bars_menu(anonymous_menu, self.ax)
self.interactor.errors_manager.toggle_all_errors(self.ax, make_visible=True)
self.assertTrue(self.ax.containers[0][2][0].get_visible())
def test_no_normalisation_options_on_non_workspace_plot(self):
fig, self.ax = plt.subplots(subplot_kw={'projection': 'mantid'})
self.ax.plot([1, 2], [1, 2], label="myLabel")
anonymous_menu = QMenu()
self.assertEqual(None, self.interactor._add_normalization_option_menu(anonymous_menu, self.ax))
# Failure tests
def test_construction_with_non_qt_canvas_raises_exception(self):
class NotQtCanvas(object):
pass
class FigureManager(object):
def __init__(self):
self.canvas = NotQtCanvas()
self.assertRaises(RuntimeError, FigureInteraction, FigureManager())
def test_context_menu_change_axis_scale_is_axis_aware(self):
fig = plot([self.ws, self.ws1], spectrum_nums=[1, 1], tiled=True)
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
scale_types = ("log", "log")
ax = fig.axes[0]
ax1 = fig.axes[1]
current_scale_types = (ax.get_xscale(), ax.get_yscale())
current_scale_types1 = (ax1.get_xscale(), ax1.get_yscale())
self.assertEqual(current_scale_types, current_scale_types1)
fig_interactor._quick_change_axes(scale_types, ax)
current_scale_types2 = (ax.get_xscale(), ax.get_yscale())
self.assertNotEqual(current_scale_types2, current_scale_types1)
def test_scale_on_ragged_workspaces_maintained_when_toggling_normalisation(self):
ws = CreateWorkspace(DataX=[1, 2, 3, 4, 2, 4, 6, 8], DataY=[2] * 8, NSpec=2, OutputWorkspace="ragged_ws")
fig = pcolormesh_from_names([ws])
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
fig_interactor._toggle_normalization(fig.axes[0])
clim = fig.axes[0].images[0].get_clim()
fig_interactor._toggle_normalization(fig.axes[0])
self.assertEqual(clim, fig.axes[0].images[0].get_clim())
self.assertNotEqual((-0.1, 0.1), fig.axes[0].images[0].get_clim())
def test_log_maintained_when_normalisation_toggled(self):
ws = CreateWorkspace(DataX=[1, 2, 3, 4, 2, 4, 6, 8], DataY=[2] * 8, NSpec=2, OutputWorkspace="ragged_ws")
fig = pcolormesh_from_names([ws])
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
fig_interactor._change_colorbar_axes(LogNorm)
fig_interactor._toggle_normalization(fig.axes[0])
self.assertTrue(isinstance(fig.axes[0].images[-1].norm, LogNorm))
@patch('workbench.plotting.figureinteraction.QMenu', autospec=True)
@patch('workbench.plotting.figureinteraction.figure_type', autospec=True)
def test_right_click_gives_marker_menu_when_hovering_over_one(self, mocked_figure_type, mocked_qmenu_cls):
mouse_event = self._create_mock_right_click()
mouse_event.inaxes.get_xlim.return_value = (1, 2)
mouse_event.inaxes.get_ylim.return_value = (1, 2)
mocked_figure_type.return_value = FigureType.Line
marker1 = MagicMock()
marker2 = MagicMock()
marker3 = MagicMock()
self.interactor.markers = [marker1, marker2, marker3]
for marker in self.interactor.markers:
marker.is_above.return_value = True
# Expect a call to QMenu() for the outer menu followed by two more calls
# for the Axes and Normalization menus
qmenu_call1 = MagicMock()
qmenu_call2 = MagicMock()
qmenu_call3 = MagicMock()
qmenu_call4 = MagicMock()
mocked_qmenu_cls.side_effect = [qmenu_call1, qmenu_call2, qmenu_call3, qmenu_call4]
with patch('workbench.plotting.figureinteraction.QActionGroup', autospec=True):
with patch.object(self.interactor.toolbar_manager, 'is_tool_active', lambda: False):
with patch.object(self.interactor, 'add_error_bars_menu', MagicMock()):
self.interactor.on_mouse_button_press(mouse_event)
self.assertEqual(0, qmenu_call1.addSeparator.call_count)
self.assertEqual(0, qmenu_call1.addAction.call_count)
expected_qmenu_calls = [call(),
call(marker1.name, qmenu_call1),
call(marker2.name, qmenu_call1),
call(marker3.name, qmenu_call1)]
self.assertEqual(expected_qmenu_calls, mocked_qmenu_cls.call_args_list)
# 2 Actions in marker menu
self.assertEqual(2, qmenu_call2.addAction.call_count)
self.assertEqual(2, qmenu_call3.addAction.call_count)
self.assertEqual(2, qmenu_call4.addAction.call_count)
@patch('workbench.plotting.figureinteraction.SingleMarker')
def test_adding_horizontal_marker_adds_correct_marker(self, mock_marker):
y0, y1 = 0, 1
data = MagicMock()
axis = MagicMock()
self.interactor._add_horizontal_marker(data, y0, y1, axis)
expected_call = call(self.interactor.canvas, '#2ca02c', data, y0, y1,
name='marker 0',
marker_type='YSingle',
line_style='dashed',
axis=axis)
self.assertEqual(1, mock_marker.call_count)
mock_marker.assert_has_calls([expected_call])
@patch('workbench.plotting.figureinteraction.SingleMarker')
def test_adding_vertical_marker_adds_correct_marker(self, mock_marker):
x0, x1 = 0, 1
data = MagicMock()
axis = MagicMock()
self.interactor._add_vertical_marker(data, x0, x1, axis)
expected_call = call(self.interactor.canvas, '#2ca02c', data, x0, x1,
name='marker 0',
marker_type='XSingle',
line_style='dashed',
axis=axis)
self.assertEqual(1, mock_marker.call_count)
mock_marker.assert_has_calls([expected_call])
def test_delete_marker_does_not_delete_markers_if_not_present(self):
marker = MagicMock()
self.interactor.markers = []
self.interactor._delete_marker(marker)
self.assertEqual(0, self.interactor.canvas.draw.call_count)
self.assertEqual(0, marker.marker.remove.call_count)
self.assertEqual(0, marker.remove_all_annotations.call_count)
def test_delete_marker_preforms_correct_cleanup(self):
marker = MagicMock()
self.interactor.markers = [marker]
self.interactor._delete_marker(marker)
self.assertEqual(1, marker.marker.remove.call_count)
self.assertEqual(1, marker.remove_all_annotations.call_count)
self.assertEqual(1, self.interactor.canvas.draw.call_count)
self.assertNotIn(marker, self.interactor.markers)
@patch('workbench.plotting.figureinteraction.SingleMarkerEditor')
@patch('workbench.plotting.figureinteraction.QApplication')
def test_edit_marker_opens_correct_editor(self, mock_qapp, mock_editor):
marker = MagicMock()
expected_call = [call(self.interactor.canvas,
marker,
self.interactor.valid_lines,
self.interactor.valid_colors,
[])]
self.interactor._edit_marker(marker)
self.assertEqual(1, mock_qapp.restoreOverrideCursor.call_count)
mock_editor.assert_has_calls(expected_call)
@patch('workbench.plotting.figureinteraction.GlobalMarkerEditor')
def test_global_edit_marker_opens_correct_editor(self, mock_editor):
marker = MagicMock()
self.interactor.markers = [marker]
expected_call = [call(self.interactor.canvas, [marker],
self.interactor.valid_lines,
self.interactor.valid_colors)]
self.interactor._global_edit_markers()
mock_editor.assert_has_calls(expected_call)
def test_motion_event_returns_if_toolbar_has_active_tools(self):
self.interactor.toolbar_manager.is_tool_active = MagicMock(return_value=True)
self.interactor._set_hover_cursor = MagicMock()
self.interactor.motion_event(MagicMock())
self.assertEqual(0, self.interactor._set_hover_cursor.call_count)
def test_motion_event_returns_if_fit_active(self):
self.interactor.toolbar_manager.is_fit_active = MagicMock(return_value=True)
self.interactor._set_hover_cursor = MagicMock()
self.interactor.motion_event(MagicMock())
self.assertEqual(0, self.interactor._set_hover_cursor.call_count)
def test_motion_event_changes_cursor_and_draws_canvas_if_any_marker_is_moving(self):
markers = [MagicMock(), MagicMock(), MagicMock()]
for marker in markers:
marker.mouse_move.return_value = True
event = MagicMock()
event.xdata = 1
event.ydata = 2
self.interactor.markers = markers
self.interactor.toolbar_manager.is_tool_active = MagicMock(return_value=False)
self.interactor.toolbar_manager.is_fit_active = MagicMock(return_value=False)
self.interactor._set_hover_cursor = MagicMock()
self.interactor.motion_event(event)
self.interactor._set_hover_cursor.assert_has_calls([call(1, 2)])
self.assertEqual(1, self.interactor.canvas.draw.call_count)
def test_motion_event_changes_cursor_and_does_not_draw_canvas_if_no_marker_is_moving(self):
markers = [MagicMock(), MagicMock(), MagicMock()]
for marker in markers:
marker.mouse_move.return_value = False
event = MagicMock()
event.xdata = 1
event.ydata = 2
self.interactor.markers = markers
self.interactor.toolbar_manager.is_tool_active = MagicMock(return_value=False)
self.interactor.toolbar_manager.is_fit_active = MagicMock(return_value=False)
self.interactor._set_hover_cursor = MagicMock()
self.interactor.motion_event(event)
self.interactor._set_hover_cursor.assert_has_calls([call(1, 2)])
self.assertEqual(0, self.interactor.canvas.draw.call_count)
def test_redraw_annotations_removes_and_adds_all_annotations_for_all_markers(self):
markers = [MagicMock(), MagicMock(), MagicMock()]
call_list = [call.remove_all_annotations(), call.add_all_annotations()]
self.interactor.markers = markers
self.interactor.redraw_annotations()
for marker in markers:
marker.assert_has_calls(call_list)
def test_mpl_redraw_annotations_does_not_redraw_if_event_does_not_have_a_button_attribute(self):
self.interactor.redraw_annotations = MagicMock()
event = MagicMock(spec='no_button')
event.no_button = MagicMock(spec='no_button')
self.interactor.mpl_redraw_annotations(event.no_button)
self.assertEqual(0, self.interactor.redraw_annotations.call_count)
def test_mpl_redraw_annotations_does_not_redraw_if_event_button_not_pressed(self):
self.interactor.redraw_annotations = MagicMock()
event = MagicMock()
event.button = None
self.interactor.mpl_redraw_annotations(event)
self.assertEqual(0, self.interactor.redraw_annotations.call_count)
def test_mpl_redraw_annotations_redraws_if_button_pressed(self):
self.interactor.redraw_annotations = MagicMock()
event = MagicMock()
self.interactor.mpl_redraw_annotations(event)
self.assertEqual(1, self.interactor.redraw_annotations.call_count)
def test_toggle_normalisation_on_contour_plot_maintains_contour_line_colour(self):
from mantid.plots.legend import convert_color_to_hex
ws = CreateWorkspace(DataX=[1, 2, 3, 4, 2, 4, 6, 8], DataY=[2] * 8, NSpec=2, OutputWorkspace="test_ws")
fig = plot_contour([ws])
for col in fig.get_axes()[0].collections:
col.set_color("#ff9900")
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
fig_interactor._toggle_normalization(fig.axes[0])
self.assertTrue(all(convert_color_to_hex(col.get_color()[0]) == "#ff9900"
for col in fig.get_axes()[0].collections))
def test_toggle_normalisation_applies_to_all_images_if_one_colorbar(self):
fig = pcolormesh([self.ws, self.ws])
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
# there should be 3 axes, 2 colorplots and 1 colorbar
self.assertEqual(3, len(fig.axes))
fig.axes[0].tracked_workspaces.values()
self.assertTrue(fig.axes[0].tracked_workspaces['ws'][0].is_normalized)
self.assertTrue(fig.axes[1].tracked_workspaces['ws'][0].is_normalized)
fig_interactor._toggle_normalization(fig.axes[0])
self.assertFalse(fig.axes[0].tracked_workspaces['ws'][0].is_normalized)
self.assertFalse(fig.axes[1].tracked_workspaces['ws'][0].is_normalized)
# Private methods
def _create_mock_fig_manager_to_accept_right_click(self):
fig_manager = MagicMock()
canvas = MagicMock()
type(canvas).buttond = PropertyMock(return_value={Qt.RightButton: 3})
fig_manager.canvas = canvas
return fig_manager
def _create_mock_right_click(self):
mouse_event = MagicMock(inaxes=MagicMock(spec=MantidAxes, collections = [], creation_args = [{}]))
type(mouse_event).button = PropertyMock(return_value=3)
return mouse_event
def _test_toggle_normalization(self, errorbars_on, plot_kwargs):
fig = plot([self.ws], spectrum_nums=[1], errors=errorbars_on,
plot_kwargs=plot_kwargs)
mock_canvas = MagicMock(figure=fig)
fig_manager_mock = MagicMock(canvas=mock_canvas)
fig_interactor = FigureInteraction(fig_manager_mock)
# Earlier versions of matplotlib do not store the data assciated with a
# line with high precision and hence we need to set a lower tolerance
# when making comparisons of this data
if matplotlib.__version__ < "2":
decimal_tol = 1
else:
decimal_tol = 7
ax = fig.axes[0]
fig_interactor._toggle_normalization(ax)
assert_almost_equal(ax.lines[0].get_xdata(), [15, 25])
assert_almost_equal(ax.lines[0].get_ydata(), [0.2, 0.3], decimal=decimal_tol)
self.assertEqual("Counts ($\\AA$)$^{-1}$", ax.get_ylabel())
fig_interactor._toggle_normalization(ax)
assert_almost_equal(ax.lines[0].get_xdata(), [15, 25])
assert_almost_equal(ax.lines[0].get_ydata(), [2, 3], decimal=decimal_tol)
self.assertEqual("Counts", ax.get_ylabel())
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
jseabold/scikit-learn | examples/gaussian_process/plot_gpr_noisy.py | 104 | 3778 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example illustrates that GPR with a sum-kernel including a WhiteKernel can
estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML. The first corresponds to a model with a high noise level and a
large length scale, which explains all variations in the data by noise. The
second one has a smaller noise level and shorter length scale, which explains
most of the variation by the noise-free functional relationship. The second
model has a higher likelihood; however, depending on the initial value for the
hyperparameters, the gradient-based optimization might also converge to the
high-noise solution. It is thus important to repeat the optimization several
times for different initializations.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 20)[:, np.newaxis]
y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])
# First run
plt.figure(0)
kernel = 1.0 * RBF(length_scale=100.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Second run
plt.figure(1)
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Plot LML landscape
plt.figure(2)
theta0 = np.logspace(-2, 3, 49)
theta1 = np.logspace(-2, 0, 50)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp.log_marginal_likelihood(np.log([0.36, Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
vmin, vmax = (-LML).min(), (-LML).max()
vmax = 50
plt.contour(Theta0, Theta1, -LML,
levels=np.logspace(np.log10(vmin), np.log10(vmax), 50),
norm=LogNorm(vmin=vmin, vmax=vmax))
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.tight_layout()
plt.show()
| bsd-3-clause |
JessicaGarson/MovieSentiment | bagginglargerrange.py | 1 | 1537 | import pandas as pd
import numpy as np
from ggplot import *
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import auc_score
train = pd.read_csv('/Users/jessicagarson/Downloads/Movie Reviews/train.csv')
test = pd.read_csv('/Users/jessicagarson/Downloads/Movie Reviews/test.csv')
def bagmodel(s):
vectorizer = CountVectorizer()
X_dict = vectorizer.fit_transform(s.Phrase)
choices = np.random.choice(range(len(s)), len(s), replace = True)
s = s.ix[choices,:]
X_train = vectorizer.transform(s.Phrase)
model = LogisticRegression().fit(X_train, list(s.Sentiment))
return model
models = []
for i in range(100):
print i
models.append(bagmodel(train))
vectorizer = CountVectorizer()
X_train = vectorizer.fit_transform(train.Phrase)
X_test = vectorizer.transform(test.Phrase)
# results = [x.predict(X_test) for x in models
result = [x.predict(X_test) for x in models]
from collections import Counter
def combination(s):
thing = Counter(s)
return thing.most_common(1)[0]
combination([3,3,2,3,3,])
result_final = []
for i in range(len(test)):
a, b = combination([x[i] for x in result])
result_final.append(a)
result_final[0]
solution = pd.DataFrame({'PhraseId': test.PhraseId, 'Sentiment': result_final})
solution.to_csv('submissionbaggedagain.csv', index=False)
plotout = ggplot(aes(x = 'Sentiment'), data=solution)
plotout + geom_histogram() | unlicense |
loli/semisupervisedforests | sklearn/ensemble/weight_boosting.py | 26 | 40570 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <noel@dawe.me>
# Gilles Louppe <g.louppe@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# Arnaud Joly <arnaud.v.joly@gmail.com>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import (
has_fit_parameter,
check_is_fitted)
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _check_sample_weight(self):
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba <= 0] = 1e-5
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
y_predict_proba[y_predict_proba <= 0] = 1e-5
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
expfactory/expfactory-docker | expdj/apps/experiments/views.py | 2 | 46466 | import csv
import datetime
import hashlib
import json
import os
import re
import shutil
import uuid
import numpy
import pandas
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied, ValidationError
from django.forms.models import model_to_dict
from django.http import HttpResponse, JsonResponse
from django.http.response import (Http404, HttpResponseForbidden,
HttpResponseRedirect)
from django.shortcuts import (get_object_or_404, redirect, render,
render_to_response)
from django.utils import timezone
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from expfactory.battery import get_experiment_run, get_load_static
from expfactory.experiment import load_experiment
from expfactory.survey import generate_survey
from expfactory.views import embed_experiment
import expdj.settings as settings
from expdj.apps.experiments.forms import (BatteryForm, BlacklistForm,
ExperimentForm,
ExperimentTemplateForm)
from expdj.apps.experiments.models import (Battery, CreditCondition,
Experiment, ExperimentTemplate,
ExperimentVariable)
from expdj.apps.experiments.utils import (complete_survey_result,
get_battery_results,
get_experiment_selection,
get_experiment_type,
install_experiments, make_results_df,
remove_keys, select_experiments,
update_credits)
from expdj.apps.main.views import google_auth_view
from expdj.apps.turk.models import (HIT, Assignment, Blacklist, Bonus, Result,
get_worker)
from expdj.apps.turk.tasks import (assign_experiment_credit,
check_battery_dependencies, check_blacklist,
experiment_reward, update_assignments)
from expdj.apps.turk.utils import get_worker_experiments
from expdj.apps.users.models import User
from expdj.settings import BASE_DIR, DOMAIN_NAME, MEDIA_ROOT, STATIC_ROOT
media_dir = os.path.join(BASE_DIR, MEDIA_ROOT)
### AUTHENTICATION ####################################################
def check_experiment_edit_permission(request):
if request.user.is_superuser:
return True
return False
def check_mturk_access(request):
if request.user.is_superuser:
return True
user_roles = User.objects.filter(user=request.user)
if len(user_roles) == 0:
return False
elif user_roles[0].role == "MTURK":
return True
return False
def check_battery_create_permission(request):
if not request.user.is_anonymous():
if request.user.is_superuser:
return True
user_roles = User.objects.filter(user=request.user)
if len(user_roles) == 0:
return False
elif user_roles[0].role in ["MTURK", "LOCAL"]:
return True
return False
def check_battery_delete_permission(request, battery):
if not request.user.is_anonymous():
if request.user == battery.owner:
return True
if request.user.is_superuser:
return True
return False
def check_battery_edit_permission(request, battery):
if not request.user.is_anonymous():
if request.user == battery.owner or request.user in battery.contributors.all():
return True
if request.user.is_superuser:
return True
return False
#### GETS #############################################################
# get experiment template
def get_experiment_template(eid, request, mode=None):
keyargs = {'pk': eid}
try:
experiment = ExperimentTemplate.objects.get(**keyargs)
except ExperimentTemplate.DoesNotExist:
raise Http404
else:
return experiment
# get experiment
def get_experiment(eid, request, mode=None):
keyargs = {'id': eid}
try:
experiment = Experiment.objects.get(**keyargs)
except Experiment.DoesNotExist:
raise Http404
else:
return experiment
# get battery with experiments
def get_battery(bid, request):
keyargs = {'pk': bid}
try:
battery = Battery.objects.get(**keyargs)
except Battery.DoesNotExist:
raise Http404
else:
return battery
#### VIEWS #############################################################
# View a single experiment
@login_required
def update_experiment_template(request, eid):
'''This will update static files, along with the config.json parameters'''
context = {"experiments": ExperimentTemplate.objects.all()}
if request.user.is_superuser:
experiment = get_experiment_template(eid=eid, request=request)
experiment_type = get_experiment_type(experiment)
errored_experiments = install_experiments(
experiment_tags=[
experiment.exp_id],
repo_type=experiment_type)
if len(errored_experiments) > 0:
message = "The experiments %s did not update successfully." % (
",".join(errored_experiments))
else:
message = "Experiments updated successfully."
experiments = ExperimentTemplate.objects.all()
context = {"experiments": experiments,
"message": message}
return render(request, "experiments/all_experiments.html", context)
# View a single experiment
def view_experiment(request, eid, bid=None):
# Determine permissions for edit and deletion
context = dict()
context["edit_permission"] = check_experiment_edit_permission(request)
context["delete_permission"] = context["edit_permission"]
# View an experiment associated with a battery
if bid:
experiment = get_experiment(eid, request)
battery = get_battery(bid, request)
context["edit_permission"] = check_battery_edit_permission(
request, battery)
context["delete_permission"] = context["edit_permission"] # same for now
template = 'experiments/experiment_details.html'
# An experiment template
else:
experiment = get_experiment_template(eid, request)
template = 'experiments/experiment_template_details.html'
context["experiment_type"] = get_experiment_type(experiment)
battery = None
context["battery"] = battery
context["experiment"] = experiment
return render_to_response(template, context)
# View a battery
@login_required
def view_battery(request, bid):
battery = get_battery(bid, request)
# Get associated HITS, update all
hits = HIT.objects.filter(battery=battery)
# Use task to update assignments
for hit in hits:
update_assignments.apply_async([hit.id])
# Generate anonymous link
anon_link = "%s/batteries/%s/%s/anon" % (
DOMAIN_NAME, bid, hashlib.md5(battery.name).hexdigest())
# Generate gmail auth link
gmail_link = "%s/batteries/%s/%s/auth" % (
DOMAIN_NAME, bid, hashlib.md5(battery.name).hexdigest())
# Determine permissions for edit and deletion
edit_permission = check_battery_edit_permission(request, battery)
delete_permission = check_battery_edit_permission(request, battery)
mturk_permission = check_mturk_access(request)
# Render assignment details
assignments = dict()
assignments["accepted"] = [
a for a in Assignment.objects.filter(
hit__battery=battery) if a.status == "A"]
assignments["none"] = [
a for a in Assignment.objects.filter(
hit__battery=battery) if a.status is None]
assignments["submit"] = [
a for a in Assignment.objects.filter(
hit__battery=battery) if a.status == "S"]
assignments["rejected"] = [
a for a in Assignment.objects.filter(
hit__battery=battery) if a.status == "R"]
context = {'battery': battery,
'edit_permission': edit_permission,
'delete_permission': delete_permission,
'mturk_permission': mturk_permission,
'hits': hits,
'anon_link': anon_link,
'gmail_link': gmail_link,
'assignments': assignments}
return render(request, 'experiments/battery_details.html', context)
# All experiments
def experiments_view(request):
experiments = ExperimentTemplate.objects.all()
delete_permission = check_experiment_edit_permission(request)
context = {'experiments': experiments,
'delete_permission': delete_permission}
return render(request, 'experiments/all_experiments.html', context)
# All batteries
@login_required
def batteries_view(request, uid=None):
if not uid:
batteries = Battery.objects.all()
else:
batteries = Battery.objects.filter(owner_id=uid)
generate_battery_permission = False
context = {'batteries': batteries}
return render(request, 'experiments/all_batteries.html', context)
# Errors and Messages ----------------------------------------------------
def enable_cookie_view(request):
'''enable_cookie_view alerts user cookies not enabled
'''
return render_to_response("experiments/cookie_sorry.html")
# Preview and Serving ----------------------------------------------------
# Preview experiments - right now just for templates
def preview_experiment(request, eid):
experiment = get_experiment_template(eid, request)
experiment_type = get_experiment_type(experiment)
experiment_folder = os.path.join(
media_dir, experiment_type, experiment.exp_id)
template = '%s/%s_preview.html' % (experiment_type, experiment_type[:-1])
experiment_html = embed_experiment(experiment_folder, url_prefix="/")
context = {"preview_html": experiment_html}
return render_to_response(template, context)
@login_required
def generate_battery_user(request, bid):
'''add a new user login url to take a battery'''
battery = get_battery(bid, request)
context = {"battery": battery,
"domain": settings.DOMAIN_NAME}
if check_battery_edit_permission(request, battery):
# Create a user result object
userid = uuid.uuid4()
worker = get_worker(userid)
context["new_user"] = userid
worker.save()
return render_to_response(
'experiments/generate_battery_user.html', context)
else:
return HttpResponseRedirect(battery.get_absolute_url())
def get_battery_intro(battery, show_advertisement=True):
instruction_forms = []
# !Important: title for consent instructions must be "Consent" - see instructions_modal.html if you change
if show_advertisement:
if battery.advertisement is not None:
instruction_forms.append(
{"title": "Advertisement", "html": battery.advertisement})
if battery.consent is not None:
instruction_forms.append({"title": "Consent", "html": battery.consent})
if battery.instructions is not None:
instruction_forms.append(
{"title": "Instructions", "html": battery.instructions})
return instruction_forms
def serve_battery_anon(request, bid, keyid):
'''serve an anonymous local battery, userid is generated upon going to link'''
# Check if the keyid is correct
battery = get_battery(bid, request)
uid = hashlib.md5(battery.name).hexdigest()
if uid == keyid:
userid = uuid.uuid4()
worker = get_worker(userid, create=True)
return redirect("intro_battery", bid=bid, userid=userid)
else:
return render_to_response("turk/robot_sorry.html")
@csrf_protect
def serve_battery_gmail(request, bid):
'''serves a battery, creating user with gmail'''
# Check if the keyid is correct
battery = get_battery(bid, request)
uid = hashlib.md5(battery.name).hexdigest()
if "keyid" in request.POST and "gmail" in request.POST:
keyid = request.POST["keyid"]
address = request.POST["gmail"]
if uid == keyid:
userid = hashlib.md5(address).hexdigest()
worker = get_worker(userid, create=True)
return redirect("intro_battery", bid=bid, userid=userid)
else:
return render_to_response("turk/robot_sorry.html")
else:
return render_to_response("turk/robot_sorry.html")
def preview_battery(request, bid):
# No robots allowed!
if request.user_agent.is_bot:
return render_to_response("turk/robot_sorry.html")
if request.user_agent.is_pc:
battery = get_battery(bid, request)
context = {"instruction_forms": get_battery_intro(battery),
"start_url": "/batteries/%s/dummy" % (bid),
"assignment_id": "assenav tahcos"}
return render(request, "turk/serve_battery_intro.html", context)
def intro_battery(request, bid, userid=None):
# No robots allowed!
if request.user_agent.is_bot:
return render_to_response("turk/robot_sorry.html")
if request.user_agent.is_pc:
battery = get_battery(bid, request)
context = {"instruction_forms": get_battery_intro(battery),
"start_url": "/batteries/%s/%s/accept" % (bid, userid),
"assignment_id": "assenav tahcos"}
return render(request, "turk/serve_battery_intro.html", context)
@login_required
def dummy_battery(request, bid):
'''dummy_battery lets the user run a faux battery (preview)'''
battery = get_battery(bid, request)
deployment = "docker-local"
# Does the worker have experiments remaining?
task_list = select_experiments(
battery, uncompleted_experiments=battery.experiments.all())
experimentTemplate = ExperimentTemplate.objects.filter(
exp_id=task_list[0].template.exp_id)[0]
experiment_type = get_experiment_type(experimentTemplate)
task_list = battery.experiments.filter(template=experimentTemplate)
result = None
context = {"worker_id": "Dummy Worker"}
if experiment_type in ["games", "surveys"]:
template = "%s/serve_battery_preview.html" % (experiment_type)
else:
template = "%s/serve_battery.html" % (experiment_type)
return deploy_battery(deployment="docker-preview",
battery=battery,
experiment_type=experiment_type,
context=context,
task_list=task_list,
template=template,
result=result)
@ensure_csrf_cookie
def serve_battery(request, bid, userid=None):
'''prepare for local serve of battery'''
next_page = None
battery = get_battery(bid, request)
# No robots allowed!
if request.user_agent.is_bot:
return render_to_response("turk/robot_sorry.html")
# Is userid not defined, redirect them to preview
if userid is None:
return preview_battery(request, bid)
worker = get_worker(userid, create=False)
if isinstance(worker, list): # no id means returning []
return render_to_response("turk/invalid_id_sorry.html")
missing_batteries, blocking_batteries = check_battery_dependencies(
battery, userid)
if missing_batteries or blocking_batteries:
return render_to_response(
"turk/battery_requirements_not_met.html",
context={'missing_batteries': missing_batteries,
'blocking_batteries': blocking_batteries}
)
# Try to get some info about browser, language, etc.
browser = "%s,%s" % (request.user_agent.browser.family,
request.user_agent.browser.version_string)
platform = "%s,%s" % (request.user_agent.os.family,
request.user_agent.os.version_string)
deployment = "docker-local"
# Does the worker have experiments remaining?
uncompleted_experiments = get_worker_experiments(worker, battery)
experiments_left = len(uncompleted_experiments)
if experiments_left == 0:
# Thank you for your participation - no more experiments!
return render_to_response("turk/worker_sorry.html")
task_list = select_experiments(battery, uncompleted_experiments)
experimentTemplate = ExperimentTemplate.objects.filter(
exp_id=task_list[0].template.exp_id)[0]
experiment_type = get_experiment_type(experimentTemplate)
task_list = battery.experiments.filter(template=experimentTemplate)
# Generate a new results object for the worker, assignment, experiment
result, _ = Result.objects.update_or_create(
worker=worker, experiment=experimentTemplate, battery=battery, defaults={
"browser": browser, "platform": platform})
result.save()
context = {"worker_id": worker.id,
"uniqueId": result.id}
# If this is the last experiment, the finish button will link to a thank
# you page.
if experiments_left == 1:
next_page = "/finished"
# Determine template name based on template_type
template = "%s/serve_battery.html" % (experiment_type)
return deploy_battery(
deployment="docker-local",
battery=battery,
experiment_type=experiment_type,
context=context,
task_list=task_list,
template=template,
next_page=next_page,
result=result,
experiments_left=experiments_left - 1
)
def deploy_battery(deployment, battery, experiment_type, context, task_list,
template, result, next_page=None, last_experiment=False,
experiments_left=None):
'''deploy_battery is a general function for returning the final view to deploy a battery, either local or MTurk
:param deployment: either "docker-mturk" or "docker-local"
:param battery: models.Battery object
:param experiment_type: experiments,games,or surveys
:param context: context, which should already include next_page,
:param next_page: the next page to navigate to [optional] default is to reload the page to go to the next experiment
:param task_list: list of models.Experiment instances
:param template: html template to render
:param result: the result object, turk.models.Result
:param last_experiment: boolean if true will redirect the user to a page to submit the result (for surveys)
:param experiments_left: integer indicating how many experiments are left in battery.
'''
if next_page is None:
next_page = "javascript:window.location.reload();"
context["next_page"] = next_page
# Check the user blacklist status
try:
blacklist = Blacklist.objects.get(
worker=result.worker, battery=battery)
if blacklist.active:
return render_to_response("experiments/blacklist.html")
except BaseException:
pass
# Get experiment folders
experiment_folders = [
os.path.join(
media_dir,
experiment_type,
x.template.exp_id) for x in task_list]
context["experiment_load"] = get_load_static(
experiment_folders, url_prefix="/")
# Get code to run the experiment (not in external file)
runcode = ""
# Experiments templates
if experiment_type in ["experiments"]:
runcode = get_experiment_run(
experiment_folders, deployment=deployment)[
task_list[0].template.exp_id]
if result is not None:
runcode = runcode.replace("{{result.id}}", str(result.id))
runcode = runcode.replace("{{next_page}}", next_page)
if experiments_left is not None:
total_experiments = battery.experiments.count()
expleft_msg = "</p><p>Experiments left in battery {0:d} out of {1:d}</p>"
expleft_msg = expleft_msg.format(
experiments_left, total_experiments)
runcode = runcode.replace("</p>", expleft_msg)
if experiments_left == 0:
runcode = runcode.replace(
"<h1>Experiment Complete</h1>",
"<h1>All Experiments Complete</h1>")
runcode = runcode.replace(
"You have completed the experiment",
"You have completed all experiments")
runcode = runcode.replace(
"Click \"Next Experiment\" to keep your result, and progress to the next task",
"Click \"Finised\" to keep your result.")
runcode = runcode.replace(
">Next Experiment</button>",
">Finished</button>")
elif experiment_type in ["games"]:
experiment = load_experiment(experiment_folders[0])
runcode = experiment[0]["deployment_variables"]["run"]
elif experiment_type in ["surveys"]:
experiment = load_experiment(experiment_folders[0])
resultid = ""
if result is not None:
resultid = result.id
runcode, validation = generate_survey(
experiment, experiment_folders[0], form_action="/local/%s/" %
resultid, csrf_token=True)
# Field will be filled in by browser cookie, and hidden fields are
# added for data
csrf_field = '<input type="hidden" name="csrfmiddlewaretoken" value="hello">'
csrf_field = '%s\n<input type="hidden" name="djstatus" value="FINISHED">' % (
csrf_field)
csrf_field = '%s\n<input type="hidden" name="url" value="chickenfingers">' % (
csrf_field)
runcode = runcode.replace("{% csrf_token %}", csrf_field)
context["validation"] = validation
if last_experiment:
context["last_experiment"] = last_experiment
context["run"] = runcode
response = render_to_response(template, context)
# without this header, the iFrame will not render in Amazon
response['x-frame-options'] = 'this_can_be_anything'
return response
# These views are to work with backbone.js
@ensure_csrf_cookie
def sync(request, rid=None):
'''localsync
view/method for running experiments to get data from the server
:param rid: the result object ID, obtained before user sees page
'''
if request.method == "POST":
if rid is not None:
# Update the result, already has worker and assignment ID stored
result, _ = Result.objects.get_or_create(id=rid)
battery = result.battery
experiment_template = get_experiment_type(result.experiment)
if experiment_template == "experiments":
data = json.loads(request.body)
result.taskdata = data["taskdata"]["data"]
result.current_trial = data["taskdata"]["currenttrial"]
djstatus = data["djstatus"]
elif experiment_template == "games":
data = json.loads(request.body)
redirect_url = data["redirect_url"]
result.taskdata = data["taskdata"]
djstatus = data["djstatus"]
elif experiment_template == "surveys":
data = request.POST
redirect_url = data["url"]
djstatus = data["djstatus"]
# Remove keys we don't want
data = remove_keys(
data, [
"process", "csrfmiddlewaretoken", "url", "djstatus"])
result.taskdata = complete_survey_result(
result.experiment.exp_id, data)
result.save()
# if the worker finished the current experiment
if djstatus == "FINISHED":
# Mark experiment as completed
result.completed = True
result.finishtime = timezone.now()
result.version = result.experiment.version
result.save()
# Fire a task to check blacklist status, add bonus
check_blacklist.apply_async([result.id])
experiment_reward.apply_async([result.id])
data = dict()
data["finished_battery"] = "NOTFINISHED"
data["djstatus"] = djstatus
completed_experiments = get_worker_experiments(
result.worker, battery, completed=True)
completed_experiments = numpy.unique(
[x.template.exp_id for x in completed_experiments]).tolist()
if len(completed_experiments) == battery.experiments.count():
assign_experiment_credit.apply_async(
[result.worker.id], countdown=60)
data["finished_battery"] = "FINISHED"
# Refresh the page if we've completed a survey or game
if experiment_template in ["surveys"]:
return redirect(redirect_url)
data = json.dumps(data)
else:
data = json.dumps({"message": "received!"})
return HttpResponse(data, content_type='application/json')
#### EDIT/ADD/DELETE ###################################################
# General install functions ----------------------------------------------
@login_required
def add_new_template(request, template_type):
'''add_new_template
View for installing new survey, game, or experiment
'''
new_selection = get_experiment_selection(template_type)
template = "%s/add_%s_template.html" % (template_type, template_type[:-1])
current_experiments = ExperimentTemplate.objects.all()
tags = [e.exp_id for e in current_experiments]
newselection = [e for e in new_selection if e["exp_id"] not in tags]
context = {"newtemplates": newselection,
"experiments": current_experiments}
return render(request, template, context)
@login_required
def save_new_template(request, template_type):
'''save_new_template
view for actually adding new surveys, experiments, or games (files, etc) to application and database
'''
newtemplates = request.POST.keys()
new_selection = get_experiment_selection(template_type)
selected_experiments = [e["exp_id"]
for e in new_selection if e["exp_id"] in newtemplates]
errored = install_experiments(
experiment_tags=selected_experiments,
repo_type=template_type)
if len(errored) > 0:
message = "The %s %s did not install successfully." % (
template_type, ",".join(errored))
else:
message = "%s installed successfully." % (template_type)
experiments = ExperimentTemplate.objects.all()
context = {"experiments": experiments,
"message": message}
return render(request, "experiments/all_experiments.html", context)
# Install Templates ----------------------------------------------------------
@login_required
def add_experiment_template(request):
return add_new_template(request, "experiments")
@login_required
def add_survey_template(request):
return add_new_template(request, "surveys")
@login_required
def add_game_template(request):
return add_new_template(request, "games")
@login_required
def save_experiment_template(request):
return save_new_template(request, "experiments")
@login_required
def save_survey_template(request):
return save_new_template(request, "surveys")
@login_required
def save_game_template(request):
return save_new_template(request, "games")
@login_required
def edit_experiment_template(request, eid=None):
'''edit_experiment_template
view for editing a single experiment. Likely only will be useful to change publication status
'''
# Editing an existing experiment
if eid:
experiment = get_experiment_template(eid, request)
else:
return HttpResponseRedirect("add_experiment_template")
if request.method == "POST":
form = ExperimentTemplateForm(request.POST, instance=experiment)
if form.is_valid():
experiment = form.save(commit=False)
experiment.save()
context = {
'experiment': experiment.name,
}
return HttpResponseRedirect(experiment.get_absolute_url())
else:
form = ExperimentTemplateForm(instance=experiment)
context = {"form": form,
"experiment": experiment}
return render(
request,
"experiments/edit_experiment_template.html",
context)
# Delete an experiment
@login_required
def delete_experiment_template(request, eid, do_redirect=True):
experiment = get_experiment_template(eid, request)
experiment_instances = Experiment.objects.filter(template=experiment)
experiment_type = get_experiment_type(experiment)
if check_experiment_edit_permission(request):
# Static Files
[e.delete() for e in experiment_instances]
static_files_dir = os.path.join(
media_dir, experiment_type, experiment.exp_id)
if os.path.exists(static_files_dir):
shutil.rmtree(static_files_dir)
# delete associated results
results = Result.objects.filter(experiment=experiment)
[r.delete() for r in results]
# Cognitive Atlas Task
task = experiment.cognitive_atlas_task
try:
if experiment.cognitive_atlas_task.experiment_set.count() == 1:
# We might want to delete concepts too? Ok for now.
task.delete()
except BaseException:
pass
experiment.delete()
if do_redirect:
return redirect('experiments')
# Experiments ----------------------------------------------------------
@login_required
def edit_experiment(request, bid, eid):
'''edit_experiment
view to edit experiment already added to battery
'''
battery = get_battery(bid, request)
experiment = get_experiment(eid, request)
if request.method == "POST":
form = ExperimentForm(request.POST, instance=experiment)
if form.is_valid():
experiment = form.save(commit=False)
experiment.save()
for cc in experiment.credit_conditions.all():
update_credits(experiment, cc.id)
return HttpResponseRedirect(battery.get_absolute_url())
else:
form = ExperimentForm(instance=experiment)
context = {"form": form,
"experiment": experiment,
"battery": battery}
return render(request, "experiments/edit_experiment.html", context)
@login_required
def save_experiment(request, bid):
'''save_experiment
save experiment and custom details for battery
'''
if request.method == "POST":
post_vars = request.POST.keys()
battery = get_battery(bid, request)
template = get_experiment_template(request.POST["experiment"], request)
expression = re.compile("[0-9]+")
experiment_vids = numpy.unique([expression.findall(
x)[0] for x in post_vars if expression.search(x)]).tolist()
# Create a credit condition for each experiment variable
credit_conditions = []
include_bonus = False
include_catch = False
for vid in experiment_vids:
# Assume that adding the credit condition means the user wants them
# turned on
if ((template.performance_variable is not None) and (
int(vid) == template.performance_variable.id)):
include_bonus = True
if ((template.rejection_variable is not None) and (
int(vid) == template.rejection_variable.id)):
include_catch = True
experiment_variable = ExperimentVariable.objects.filter(id=vid)[0]
variable_value = request.POST["val%s" % (
vid)] if "val%s" % (vid) in post_vars else None
variable_operator = request.POST["oper%s" % (
vid)] if "oper%s" % (vid) in post_vars else None
variable_amount = request.POST["amt%s" % (
vid)] if "amt%s" % (vid) in post_vars else None
credit_condition, _ = CreditCondition.objects.update_or_create(variable=experiment_variable,
value=variable_value,
operator=variable_operator,
amount=variable_amount)
credit_condition.save()
credit_conditions.append(credit_condition)
# Create the experiment to add to the battery
experiment = Experiment.objects.create(template=template,
include_bonus=include_bonus,
include_catch=include_catch)
experiment.save()
experiment.credit_conditions = credit_conditions
experiment.save()
# Add to battery, will replace old version if it exists
current_experiments = [e for e in battery.experiments.all(
) if e.template.exp_id not in template.exp_id]
current_experiments.append(experiment)
battery.experiments = current_experiments
battery.save()
return HttpResponseRedirect(battery.get_absolute_url())
@login_required
def prepare_change_experiment(
request,
battery,
experiments,
change_type="Edit"):
'''prepare_change_experiment returns view of either new experiments
(not in battery) or experiments in battery to edit (depending on calling
function)
:param battery: expdj.apps.experiments.models.Battery
:param experiments: expdj.apps.experiments.models.ExperimentTemplate
:param change_type: The string to display to the user to indicate how
changing experiment, eg, "Edit" or "Add New" [Experiment]
'''
# Capture the performance and rejection variables appropriately
experimentsbytag = dict()
for exp in experiments:
experimentjson = model_to_dict(exp)
if exp.performance_variable:
experimentjson["performance_variable"] = model_to_dict(
exp.performance_variable)
if exp.rejection_variable:
experimentjson["rejection_variable"] = model_to_dict(
exp.rejection_variable)
experimentsbytag[experimentjson["exp_id"]] = experimentjson
# Present in abc order, color by new/old experiments
experiment_tags = sorted([x.exp_id for x in experiments])
experiments_sorted = []
for experiment_tag in experiment_tags:
experiments_sorted.append(experimentsbytag[experiment_tag])
context = {"allexperiments": experiments_sorted,
"allexperimentsjson": json.dumps(experimentsbytag),
"bid": battery.id,
"change_type": change_type}
return render(request, "experiments/add_experiment.html", context)
@login_required
def modify_experiment(request, bid):
'''modify_experiment
View for presenting already installed experiments (to modify) in a battery
'''
battery = get_battery(bid, request)
current_experiments = [
x.template.exp_id for x in battery.experiments.all()]
oldexperiments = [
x for x in ExperimentTemplate.objects.all() if x.exp_id in current_experiments]
return prepare_change_experiment(request, battery, oldexperiments)
@login_required
def add_experiment(request, bid):
'''add_experiment
View for presenting available experiments to user to install to battery
'''
battery = get_battery(bid, request)
current_experiments = [
x.template.exp_id for x in battery.experiments.all()]
newexperiments = [x for x in ExperimentTemplate.objects.all(
) if x.exp_id not in current_experiments]
return prepare_change_experiment(
request, battery, newexperiments, "Add New")
@login_required
def change_experiment_order(request, bid, eid):
'''change_experiment_order changes the ordering of experiment presentation.
Any integer value is allowed, and duplicate values means that experiments will
the equivalent number will be selected from randomly.
:param bid: the battery id
:param eid: the experiment id
'''
experiment = get_experiment(eid, request)
battery = get_battery(bid, request)
can_edit = check_experiment_edit_permission(request)
if request.method == "POST":
if can_edit:
if "order" in request.POST:
new_order = request.POST["order"]
if new_order != "":
experiment.order = int(new_order)
experiment.save()
return HttpResponseRedirect(battery.get_absolute_url())
@login_required
def remove_experiment(request, bid, eid):
'''remove_experiment
removes an experiment from a battery
'''
battery = get_battery(bid, request)
experiment = get_experiment(eid, request)
if check_battery_edit_permission(request, battery):
battery.experiments = [
x for x in battery.experiments.all() if x.id != experiment.id]
battery.save()
# If experiment is not linked to other batteries, delete it
if len(Battery.objects.filter(experiments__id=experiment.id)) == 0:
experiment.delete()
return HttpResponseRedirect(battery.get_absolute_url())
# Conditions -----------------------------------------------------------
@login_required
def remove_condition(request, bid, eid, cid):
'''remove_condition: removes a condition from being associated with a battery
'''
battery = get_battery(bid, request)
experiment = get_experiment(eid, request)
credit_condition = CreditCondition.objects.filter(id=cid)[0]
experiment.credit_conditions = [
c for c in experiment.credit_conditions.all() if c != credit_condition]
# Delete credit condition if not attached to experiments
if len(Experiment.objects.filter(credit_conditions__id=cid)) == 0:
credit_condition.delete()
# Deletes condition from experiments, if not used from database, turns
# bonus/rejection on/off
update_credits(experiment, cid)
form = ExperimentForm(instance=experiment)
context = {"form": form,
"experiment": experiment,
"battery": battery}
return render(request, "experiments/edit_experiment.html", context)
# Battery --------------------------------------------------------------
@login_required
def add_battery(request):
'''add_battery
Function for adding new battery to database
'''
return redirect('batteries')
@login_required
def edit_battery(request, bid=None):
# Does the user have mturk permission?
mturk_permission = check_mturk_access(request)
battery_permission = check_battery_create_permission(request)
if battery_permission:
header_text = "Add new battery"
if bid:
battery = get_battery(bid, request)
is_owner = battery.owner == request.user
header_text = battery.name
battery_edit_permission = check_battery_edit_permission(
request, battery)
if not battery_edit_permission:
return HttpResponseForbidden()
else:
is_owner = True
battery = Battery(owner=request.user)
battery_edit_permission = True
if request.method == "POST":
if is_owner:
form = BatteryForm(request.POST, instance=battery)
else:
return HttpResponse('Unauthorized', status=401)
if form.is_valid():
previous_contribs = set()
if form.instance.pk is not None:
previous_contribs = set(form.instance.contributors.all())
battery = form.save(commit=False)
battery.save()
if is_owner:
form.save_m2m() # save contributors
current_contribs = set(battery.contributors.all())
new_contribs = list(
current_contribs.difference(previous_contribs))
return HttpResponseRedirect(battery.get_absolute_url())
else:
if is_owner:
form = BatteryForm(instance=battery)
else:
form = BatteryForm(instance=battery)
context = {"form": form,
"is_owner": is_owner,
"header_text": header_text,
"mturk_permission": mturk_permission,
"battery_edit_permission": battery_edit_permission}
return render(request, "experiments/edit_battery.html", context)
else:
return redirect("batteries")
# Delete a battery
@login_required
def delete_battery(request, bid):
battery = get_battery(bid, request)
delete_permission = check_battery_delete_permission(request, battery)
if delete_permission:
hits = HIT.objects.filter(battery=battery)
[h.delete() for h in hits]
results = Result.objects.filter(battery=battery)
[r.delete() for r in results]
battery.delete()
return redirect('batteries')
@login_required
def subject_management(request, bid):
'''subject_management includes blacklist criteria, etc.
:param bid: the battery id
'''
battery = get_battery(bid, request)
blacklists = Blacklist.objects.filter(battery=battery)
bonuses = Bonus.objects.filter(battery=battery)
if request.method == "POST":
form = BlacklistForm(request.POST, instance=battery)
if form.is_valid():
battery = form.save()
return HttpResponseRedirect(battery.get_absolute_url())
else:
form = BlacklistForm(instance=battery)
context = {"form": form,
"battery": battery,
"blacklists": blacklists,
"bonuses": bonuses}
return render(request, "experiments/subject_management.html", context)
#### EXPORT #############################################################
# Export specific experiment data
@login_required
def export_battery(request, bid):
battery = get_battery(bid, request)
output_name = "expfactory_battery_%s.tsv" % (battery.id)
return export_experiments(battery, output_name)
# Export specific experiment data
@login_required
def export_experiment(request, eid):
battery = Battery.objects.filter(experiments__id=eid)[0]
experiment = get_experiment(eid, request)
output_name = "expfactory_experiment_%s.tsv" % (experiment.template.exp_id)
return export_experiments(
battery, output_name, [
experiment.template.exp_id])
# General function to export some number of experiments
def export_experiments(battery, output_name, experiment_tags=None):
# Get all results associated with Battery
results = Result.objects.filter(battery=battery)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s"' % (
output_name)
writer = csv.writer(response, delimiter='\t')
# Make a results pandas dataframe
df = make_results_df(battery, results)
# Specifying individual experiments removes between trial stufs
if experiment_tags is not None:
if isinstance(experiment_tags, str):
experiment_tags = [experiment_tags]
df = df[df.experiment_exp_id.isin(experiment_tags)]
# The program reading in values should fill in appropriate nan value
df[df.isnull()] = ""
# Write header
writer.writerow(df.columns.tolist())
for row in df.iterrows():
try:
values = row[1].tolist()
writer.writerow(values)
except BaseException:
pass
return response
#### RESULTS VISUALIZATION ###############################################
@login_required
def battery_results_dashboard(request, bid):
'''battery_results_dashboard will show the user a dashboard to select an experiment
to view results for
'''
context = battery_results_context(request, bid)
return render(
request,
"experiments/results_dashboard_battery.html",
context)
@login_required
def battery_results_context(request, bid):
'''battery_result_context is a general function used by experiment and battery
results dashboard to return context with experiments completed for a battery
'''
battery = get_battery(bid, request)
# Check if battery has results
results = Result.objects.filter(battery=battery, completed=True)
completed_experiments = numpy.unique(
[r.experiment.exp_id for r in results]).tolist()
experiments = ExperimentTemplate.objects.filter(
exp_id__in=completed_experiments)
context = {'battery': battery,
'experiments': experiments,
'bid': battery.id}
return context
@login_required
def experiment_results_dashboard(request, bid):
'''experiment_results_dashboard will show the user a result for a particular experiment
'''
if request.method == "POST":
battery = get_battery(bid, request)
template = get_experiment_template(request.POST["experiment"], request)
results = get_battery_results(
battery, exp_id=template.exp_id, clean=True)
if len(results) == 0:
context = battery_results_context(request, bid)
context["message"] = "%s does not have any completed results." % template.name
return render(
request,
"experiments/results_dashboard_battery.html",
context)
# If we have results, save updated file for shiny server
shiny_input = os.path.abspath(
"expfactory-explorer/data/%s_data.tsv" %
template.exp_id)
results.to_csv(shiny_input, sep="\t", encoding="utf-8")
return HttpResponseRedirect('%s:3838' % settings.DOMAIN_NAME_HTTP)
else:
context = battery_results_context(request, bid)
return render(
request,
"experiments/results_dashboard_battery.html",
context)
| mit |
chetan51/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colorbar.py | 69 | 27260 | '''
Colorbar toolkit with two classes and a function:
:class:`ColorbarBase`
the base class with full colorbar drawing functionality.
It can be used as-is to make a colorbar for a given colormap;
a mappable object (e.g., image) is not needed.
:class:`Colorbar`
the derived class for use with images or contour plots.
:func:`make_axes`
a function for resizing an axes and adding a second axes
suitable for a colorbar
The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes`
and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function
is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`.
'''
import numpy as np
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import matplotlib.cbook as cbook
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.collections as collections
import matplotlib.contour as contour
make_axes_kw_doc = '''
========== ====================================================
Property Description
========== ====================================================
*fraction* 0.15; fraction of original axes to use for colorbar
*pad* 0.05 if vertical, 0.15 if horizontal; fraction
of original axes between colorbar and new image axes
*shrink* 1.0; fraction by which to shrink the colorbar
*aspect* 20; ratio of long to short dimensions
========== ====================================================
'''
colormap_kw_doc = '''
=========== ====================================================
Property Description
=========== ====================================================
*extend* [ 'neither' | 'both' | 'min' | 'max' ]
If not 'neither', make pointed end(s) for out-of-
range values. These are set for a given colormap
using the colormap set_under and set_over methods.
*spacing* [ 'uniform' | 'proportional' ]
Uniform spacing gives each discrete color the same
space; proportional makes the space proportional to
the data interval.
*ticks* [ None | list of ticks | Locator object ]
If None, ticks are determined automatically from the
input.
*format* [ None | format string | Formatter object ]
If None, the
:class:`~matplotlib.ticker.ScalarFormatter` is used.
If a format string is given, e.g. '%.3f', that is
used. An alternative
:class:`~matplotlib.ticker.Formatter` object may be
given instead.
*drawedges* [ False | True ] If true, draw lines at color
boundaries.
=========== ====================================================
The following will probably be useful only in the context of
indexed colors (that is, when the mappable has norm=NoNorm()),
or other unusual circumstances.
============ ===================================================
Property Description
============ ===================================================
*boundaries* None or a sequence
*values* None or a sequence which must be of length 1 less
than the sequence of *boundaries*. For each region
delimited by adjacent entries in *boundaries*, the
color mapped to the corresponding value in values
will be used.
============ ===================================================
'''
colorbar_doc = '''
Add a colorbar to a plot.
Function signatures for the :mod:`~matplotlib.pyplot` interface; all
but the first are also method signatures for the
:meth:`~matplotlib.figure.Figure.colorbar` method::
colorbar(**kwargs)
colorbar(mappable, **kwargs)
colorbar(mappable, cax=cax, **kwargs)
colorbar(mappable, ax=ax, **kwargs)
arguments:
*mappable*
the :class:`~matplotlib.image.Image`,
:class:`~matplotlib.contour.ContourSet`, etc. to
which the colorbar applies; this argument is mandatory for the
:meth:`~matplotlib.figure.Figure.colorbar` method but optional for the
:func:`~matplotlib.pyplot.colorbar` function, which sets the
default to the current image.
keyword arguments:
*cax*
None | axes object into which the colorbar will be drawn
*ax*
None | parent axes object from which space for a new
colorbar axes will be stolen
Additional keyword arguments are of two kinds:
axes properties:
%s
colorbar properties:
%s
If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend*
kwarg is included automatically.
Note that the *shrink* kwarg provides a simple way to keep a vertical
colorbar, for example, from being taller than the axes of the mappable
to which the colorbar is attached; but it is a manual method requiring
some trial and error. If the colorbar is too tall (or a horizontal
colorbar is too wide) use a smaller value of *shrink*.
For more precise control, you can manually specify the positions of
the axes objects in which the mappable and the colorbar are drawn. In
this case, do not use any of the axes properties kwargs.
returns:
:class:`~matplotlib.colorbar.Colorbar` instance; see also its base class,
:class:`~matplotlib.colorbar.ColorbarBase`. Call the
:meth:`~matplotlib.colorbar.ColorbarBase.set_label` method
to label the colorbar.
''' % (make_axes_kw_doc, colormap_kw_doc)
class ColorbarBase(cm.ScalarMappable):
'''
Draw a colorbar in an existing axes.
This is a base class for the :class:`Colorbar` class, which is the
basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab
function.
It is also useful by itself for showing a colormap. If the *cmap*
kwarg is given but *boundaries* and *values* are left as None,
then the colormap will be displayed on a 0-1 scale. To show the
under- and over-value colors, specify the *norm* as::
colors.Normalize(clip=False)
To show the colors versus index instead of on the 0-1 scale,
use::
norm=colors.NoNorm.
Useful attributes:
:attr:`ax`
the Axes instance in which the colorbar is drawn
:attr:`lines`
a LineCollection if lines were drawn, otherwise None
:attr:`dividers`
a LineCollection if *drawedges* is True, otherwise None
Useful public methods are :meth:`set_label` and :meth:`add_lines`.
'''
_slice_dict = {'neither': slice(0,1000000),
'both': slice(1,-1),
'min': slice(1,1000000),
'max': slice(0,-1)}
def __init__(self, ax, cmap=None,
norm=None,
alpha=1.0,
values=None,
boundaries=None,
orientation='vertical',
extend='neither',
spacing='uniform', # uniform or proportional
ticks=None,
format=None,
drawedges=False,
filled=True,
):
self.ax = ax
if cmap is None: cmap = cm.get_cmap()
if norm is None: norm = colors.Normalize()
self.alpha = alpha
cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm)
self.values = values
self.boundaries = boundaries
self.extend = extend
self._inside = self._slice_dict[extend]
self.spacing = spacing
self.orientation = orientation
self.drawedges = drawedges
self.filled = filled
self.solids = None
self.lines = None
self.dividers = None
self.set_label('')
if cbook.iterable(ticks):
self.locator = ticker.FixedLocator(ticks, nbins=len(ticks))
else:
self.locator = ticks # Handle default in _ticker()
if format is None:
if isinstance(self.norm, colors.LogNorm):
self.formatter = ticker.LogFormatter()
else:
self.formatter = ticker.ScalarFormatter()
elif cbook.is_string_like(format):
self.formatter = ticker.FormatStrFormatter(format)
else:
self.formatter = format # Assume it is a Formatter
# The rest is in a method so we can recalculate when clim changes.
self.draw_all()
def draw_all(self):
'''
Calculate any free parameters based on the current cmap and norm,
and do all the drawing.
'''
self._process_values()
self._find_range()
X, Y = self._mesh()
C = self._values[:,np.newaxis]
self._config_axes(X, Y)
if self.filled:
self._add_solids(X, Y, C)
self._set_label()
def _config_axes(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = lines.Line2D(xy[:, 0], xy[:, 1], color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = patches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_xticks([])
ax.yaxis.set_label_position('right')
ax.yaxis.set_ticks_position('right')
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_yticks([])
ax.xaxis.set_label_position('bottom')
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
def _set_label(self):
if self.orientation == 'vertical':
self.ax.set_ylabel(self._label, **self._labelkw)
else:
self.ax.set_xlabel(self._label, **self._labelkw)
def set_label(self, label, **kw):
'''
Label the long axis of the colorbar
'''
self._label = label
self._labelkw = kw
self._set_label()
def _outline(self, X, Y):
'''
Return *x*, *y* arrays of colorbar bounding polygon,
taking orientation into account.
'''
N = X.shape[0]
ii = [0, 1, N-2, N-1, 2*N-1, 2*N-2, N+1, N, 0]
x = np.take(np.ravel(np.transpose(X)), ii)
y = np.take(np.ravel(np.transpose(Y)), ii)
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if self.orientation == 'horizontal':
return np.hstack((y, x))
return np.hstack((x, y))
def _edges(self, X, Y):
'''
Return the separator line segments; helper for _add_solids.
'''
N = X.shape[0]
# Using the non-array form of these line segments is much
# simpler than making them into arrays.
if self.orientation == 'vertical':
return [zip(X[i], Y[i]) for i in range(1, N-1)]
else:
return [zip(Y[i], X[i]) for i in range(1, N-1)]
def _add_solids(self, X, Y, C):
'''
Draw the colors using :meth:`~matplotlib.axes.Axes.pcolor`;
optionally add separators.
'''
## Change to pcolorfast after fixing bugs in some backends...
if self.orientation == 'vertical':
args = (X, Y, C)
else:
args = (np.transpose(Y), np.transpose(X), np.transpose(C))
kw = {'cmap':self.cmap, 'norm':self.norm,
'shading':'flat', 'alpha':self.alpha}
# Save, set, and restore hold state to keep pcolor from
# clearing the axes. Ordinarily this will not be needed,
# since the axes object should already have hold set.
_hold = self.ax.ishold()
self.ax.hold(True)
col = self.ax.pcolor(*args, **kw)
self.ax.hold(_hold)
#self.add_observer(col) # We should observe, not be observed...
self.solids = col
if self.drawedges:
self.dividers = collections.LineCollection(self._edges(X,Y),
colors=(mpl.rcParams['axes.edgecolor'],),
linewidths=(0.5*mpl.rcParams['axes.linewidth'],)
)
self.ax.add_collection(self.dividers)
def add_lines(self, levels, colors, linewidths):
'''
Draw lines on the colorbar.
'''
N = len(levels)
dummy, y = self._locate(levels)
if len(y) <> N:
raise ValueError("levels are outside colorbar range")
x = np.array([0.0, 1.0])
X, Y = np.meshgrid(x,y)
if self.orientation == 'vertical':
xy = [zip(X[i], Y[i]) for i in range(N)]
else:
xy = [zip(Y[i], X[i]) for i in range(N)]
col = collections.LineCollection(xy, linewidths=linewidths)
self.lines = col
col.set_color(colors)
self.ax.add_collection(col)
def _ticker(self):
'''
Return two sequences: ticks (colorbar data locations)
and ticklabels (strings).
'''
locator = self.locator
formatter = self.formatter
if locator is None:
if self.boundaries is None:
if isinstance(self.norm, colors.NoNorm):
nv = len(self._values)
base = 1 + int(nv/10)
locator = ticker.IndexLocator(base=base, offset=0)
elif isinstance(self.norm, colors.BoundaryNorm):
b = self.norm.boundaries
locator = ticker.FixedLocator(b, nbins=10)
elif isinstance(self.norm, colors.LogNorm):
locator = ticker.LogLocator()
else:
locator = ticker.MaxNLocator()
else:
b = self._boundaries[self._inside]
locator = ticker.FixedLocator(b, nbins=10)
if isinstance(self.norm, colors.NoNorm):
intv = self._values[0], self._values[-1]
else:
intv = self.vmin, self.vmax
locator.create_dummy_axis()
formatter.create_dummy_axis()
locator.set_view_interval(*intv)
locator.set_data_interval(*intv)
formatter.set_view_interval(*intv)
formatter.set_data_interval(*intv)
b = np.array(locator())
b, ticks = self._locate(b)
formatter.set_locs(b)
ticklabels = [formatter(t, i) for i, t in enumerate(b)]
offset_string = formatter.get_offset()
return ticks, ticklabels, offset_string
def _process_values(self, b=None):
'''
Set the :attr:`_boundaries` and :attr:`_values` attributes
based on the input boundaries and values. Input boundaries
can be *self.boundaries* or the argument *b*.
'''
if b is None:
b = self.boundaries
if b is not None:
self._boundaries = np.asarray(b, dtype=float)
if self.values is None:
self._values = 0.5*(self._boundaries[:-1]
+ self._boundaries[1:])
if isinstance(self.norm, colors.NoNorm):
self._values = (self._values + 0.00001).astype(np.int16)
return
self._values = np.array(self.values)
return
if self.values is not None:
self._values = np.array(self.values)
if self.boundaries is None:
b = np.zeros(len(self.values)+1, 'd')
b[1:-1] = 0.5*(self._values[:-1] - self._values[1:])
b[0] = 2.0*b[1] - b[2]
b[-1] = 2.0*b[-2] - b[-3]
self._boundaries = b
return
self._boundaries = np.array(self.boundaries)
return
# Neither boundaries nor values are specified;
# make reasonable ones based on cmap and norm.
if isinstance(self.norm, colors.NoNorm):
b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5
v = np.zeros((len(b)-1,), dtype=np.int16)
v[self._inside] = np.arange(self.cmap.N, dtype=np.int16)
if self.extend in ('both', 'min'):
v[0] = -1
if self.extend in ('both', 'max'):
v[-1] = self.cmap.N
self._boundaries = b
self._values = v
return
elif isinstance(self.norm, colors.BoundaryNorm):
b = list(self.norm.boundaries)
if self.extend in ('both', 'min'):
b = [b[0]-1] + b
if self.extend in ('both', 'max'):
b = b + [b[-1] + 1]
b = np.array(b)
v = np.zeros((len(b)-1,), dtype=float)
bi = self.norm.boundaries
v[self._inside] = 0.5*(bi[:-1] + bi[1:])
if self.extend in ('both', 'min'):
v[0] = b[0] - 1
if self.extend in ('both', 'max'):
v[-1] = b[-1] + 1
self._boundaries = b
self._values = v
return
else:
if not self.norm.scaled():
self.norm.vmin = 0
self.norm.vmax = 1
b = self.norm.inverse(self._uniform_y(self.cmap.N+1))
if self.extend in ('both', 'min'):
b[0] = b[0] - 1
if self.extend in ('both', 'max'):
b[-1] = b[-1] + 1
self._process_values(b)
def _find_range(self):
'''
Set :attr:`vmin` and :attr:`vmax` attributes to the first and
last boundary excluding extended end boundaries.
'''
b = self._boundaries[self._inside]
self.vmin = b[0]
self.vmax = b[-1]
def _central_N(self):
'''number of boundaries **before** extension of ends'''
nb = len(self._boundaries)
if self.extend == 'both':
nb -= 2
elif self.extend in ('min', 'max'):
nb -= 1
return nb
def _extended_N(self):
'''
Based on the colormap and extend variable, return the
number of boundaries.
'''
N = self.cmap.N + 1
if self.extend == 'both':
N += 2
elif self.extend in ('min', 'max'):
N += 1
return N
def _uniform_y(self, N):
'''
Return colorbar data coordinates for *N* uniformly
spaced boundaries, plus ends if required.
'''
if self.extend == 'neither':
y = np.linspace(0, 1, N)
else:
if self.extend == 'both':
y = np.zeros(N + 2, 'd')
y[0] = -0.05
y[-1] = 1.05
elif self.extend == 'min':
y = np.zeros(N + 1, 'd')
y[0] = -0.05
else:
y = np.zeros(N + 1, 'd')
y[-1] = 1.05
y[self._inside] = np.linspace(0, 1, N)
return y
def _proportional_y(self):
'''
Return colorbar data coordinates for the boundaries of
a proportional colorbar.
'''
if isinstance(self.norm, colors.BoundaryNorm):
b = self._boundaries[self._inside]
y = (self._boundaries - self._boundaries[0])
y = y / (self._boundaries[-1] - self._boundaries[0])
else:
y = self.norm(self._boundaries.copy())
if self.extend in ('both', 'min'):
y[0] = -0.05
if self.extend in ('both', 'max'):
y[-1] = 1.05
yi = y[self._inside]
norm = colors.Normalize(yi[0], yi[-1])
y[self._inside] = norm(yi)
return y
def _mesh(self):
'''
Return X,Y, the coordinate arrays for the colorbar pcolormesh.
These are suitable for a vertical colorbar; swapping and
transposition for a horizontal colorbar are done outside
this function.
'''
x = np.array([0.0, 1.0])
if self.spacing == 'uniform':
y = self._uniform_y(self._central_N())
else:
y = self._proportional_y()
self._y = y
X, Y = np.meshgrid(x,y)
if self.extend in ('min', 'both'):
X[0,:] = 0.5
if self.extend in ('max', 'both'):
X[-1,:] = 0.5
return X, Y
def _locate(self, x):
'''
Given a possible set of color data values, return the ones
within range, together with their corresponding colorbar
data coordinates.
'''
if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)):
b = self._boundaries
xn = x
xout = x
else:
# Do calculations using normalized coordinates so
# as to make the interpolation more accurate.
b = self.norm(self._boundaries, clip=False).filled()
# We do our own clipping so that we can allow a tiny
# bit of slop in the end point ticks to allow for
# floating point errors.
xn = self.norm(x, clip=False).filled()
in_cond = (xn > -0.001) & (xn < 1.001)
xn = np.compress(in_cond, xn)
xout = np.compress(in_cond, x)
# The rest is linear interpolation with clipping.
y = self._y
N = len(b)
ii = np.minimum(np.searchsorted(b, xn), N-1)
i0 = np.maximum(ii - 1, 0)
#db = b[ii] - b[i0]
db = np.take(b, ii) - np.take(b, i0)
db = np.where(i0==ii, 1.0, db)
#dy = y[ii] - y[i0]
dy = np.take(y, ii) - np.take(y, i0)
z = np.take(y, i0) + (xn-np.take(b,i0))*dy/db
return xout, z
def set_alpha(self, alpha):
self.alpha = alpha
class Colorbar(ColorbarBase):
def __init__(self, ax, mappable, **kw):
mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax
# are set when colorbar is called,
# even if mappable.draw has not yet
# been called. This will not change
# vmin, vmax if they are already set.
self.mappable = mappable
kw['cmap'] = mappable.cmap
kw['norm'] = mappable.norm
kw['alpha'] = mappable.get_alpha()
if isinstance(mappable, contour.ContourSet):
CS = mappable
kw['boundaries'] = CS._levels
kw['values'] = CS.cvalues
kw['extend'] = CS.extend
#kw['ticks'] = CS._levels
kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10))
kw['filled'] = CS.filled
ColorbarBase.__init__(self, ax, **kw)
if not CS.filled:
self.add_lines(CS)
else:
ColorbarBase.__init__(self, ax, **kw)
def add_lines(self, CS):
'''
Add the lines from a non-filled
:class:`~matplotlib.contour.ContourSet` to the colorbar.
'''
if not isinstance(CS, contour.ContourSet) or CS.filled:
raise ValueError('add_lines is only for a ContourSet of lines')
tcolors = [c[0] for c in CS.tcolors]
tlinewidths = [t[0] for t in CS.tlinewidths]
# The following was an attempt to get the colorbar lines
# to follow subsequent changes in the contour lines,
# but more work is needed: specifically, a careful
# look at event sequences, and at how
# to make one object track another automatically.
#tcolors = [col.get_colors()[0] for col in CS.collections]
#tlinewidths = [col.get_linewidth()[0] for lw in CS.collections]
#print 'tlinewidths:', tlinewidths
ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths)
def update_bruteforce(self, mappable):
'''
Manually change any contour line colors. This is called
when the image or contour plot to which this colorbar belongs
is changed.
'''
# We are using an ugly brute-force method: clearing and
# redrawing the whole thing. The problem is that if any
# properties have been changed by methods other than the
# colorbar methods, those changes will be lost.
self.ax.cla()
self.draw_all()
#if self.vmin != self.norm.vmin or self.vmax != self.norm.vmax:
# self.ax.cla()
# self.draw_all()
if isinstance(self.mappable, contour.ContourSet):
CS = self.mappable
if not CS.filled:
self.add_lines(CS)
#if self.lines is not None:
# tcolors = [c[0] for c in CS.tcolors]
# self.lines.set_color(tcolors)
#Fixme? Recalculate boundaries, ticks if vmin, vmax have changed.
#Fixme: Some refactoring may be needed; we should not
# be recalculating everything if there was a simple alpha
# change.
def make_axes(parent, **kw):
orientation = kw.setdefault('orientation', 'vertical')
fraction = kw.pop('fraction', 0.15)
shrink = kw.pop('shrink', 1.0)
aspect = kw.pop('aspect', 20)
#pb = transforms.PBox(parent.get_position())
pb = parent.get_position(original=True).frozen()
if orientation == 'vertical':
pad = kw.pop('pad', 0.05)
x1 = 1.0-fraction
pb1, pbx, pbcb = pb.splitx(x1-pad, x1)
pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb)
anchor = (0.0, 0.5)
panchor = (1.0, 0.5)
else:
pad = kw.pop('pad', 0.15)
pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad)
pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb)
aspect = 1.0/aspect
anchor = (0.5, 1.0)
panchor = (0.5, 0.0)
parent.set_position(pb1)
parent.set_anchor(panchor)
fig = parent.get_figure()
cax = fig.add_axes(pbcb)
cax.set_aspect(aspect, anchor=anchor, adjustable='box')
return cax, kw
make_axes.__doc__ ='''
Resize and reposition a parent axes, and return a child
axes suitable for a colorbar::
cax, kw = make_axes(parent, **kw)
Keyword arguments may include the following (with defaults):
*orientation*
'vertical' or 'horizontal'
%s
All but the first of these are stripped from the input kw set.
Returns (cax, kw), the child axes and the reduced kw dictionary.
''' % make_axes_kw_doc
| gpl-3.0 |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/scipy/stats/tests/test_morestats.py | 17 | 50896 | # Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (TestCase, run_module_suite, assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_raises, assert_, assert_allclose, assert_equal, dec, assert_warns)
from scipy import stats
from common_tests import check_named_results
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs(TestCase):
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist(TestCase):
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_warns(self):
# regression test for gh-5270
# make sure there are no spurious divide-by-zero warnings
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
[x.mean() for x in stats.mvsdist([1, 2, 3])]
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
class TestShapiro(TestCase):
def test_basic(self):
x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46,
4.43,0.21,4.75,0.71,1.52,3.24,
0.93,0.42,4.97,9.53,4.55,0.47,6.66]
w,pw = stats.shapiro(x1)
assert_almost_equal(w,0.90047299861907959,6)
assert_almost_equal(pw,0.042089745402336121,6)
x2 = [1.36,1.14,2.92,2.55,1.46,1.06,5.27,-1.11,
3.48,1.10,0.88,-0.51,1.46,0.52,6.20,1.69,
0.08,3.67,2.81,3.49]
w,pw = stats.shapiro(x2)
assert_almost_equal(w,0.9590270,6)
assert_almost_equal(pw,0.52460,3)
# Verified against R
np.random.seed(12345678)
x3 = stats.norm.rvs(loc=5, scale=3, size=100)
w, pw = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, [[], [2]])
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
assert_equal(w, np.nan)
assert_almost_equal(pw, 1.0)
class TestAnderson(TestCase):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A,crit,sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A,crit,sig = stats.anderson(x1,'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A,crit,sig = stats.anderson(x2,'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAndersonKSamp(TestCase):
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4),
midrank=False)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0021, 4)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0020, 4)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
res = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari(TestCase):
def test_small(self):
x = [1,2,3,3,4]
y = [3,2,6,1,6,1,4,1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
W, pval = stats.ansari(x,y)
assert_almost_equal(W,23.5,11)
assert_almost_equal(pval,0.13499256881897437,11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99))
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W,185.5,11)
assert_almost_equal(pval,0.18145819972867083,11)
def test_exact(self):
W,pval = stats.ansari([1,2,3,4],[15,5,20,8,10,12])
assert_almost_equal(W,10.0,11)
assert_almost_equal(pval,0.533333333333333333,7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBartlett(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T,20.78587342806484,7)
assert_almost_equal(pval,0.0136358632781,7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
class TestLevene(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W,1.7059176930008939,7)
assert_almost_equal(pval,0.0990829755522,7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed', proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1,1,21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1,1,21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1,1,21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBinomP(TestCase):
def test_data(self):
pval = stats.binom_test(100,250)
assert_almost_equal(pval,0.0018833009350757682,11)
pval = stats.binom_test(201,405)
assert_almost_equal(pval,0.92085205962670713,11)
pval = stats.binom_test([682,243],p=3.0/4)
assert_almost_equal(pval,0.38249155957481695,11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, stats.binom_test, [1,2,3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, stats.binom_test, [100])
# n less than x[0]
assert_raises(ValueError, stats.binom_test, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)
def test_alternatives(self):
res = stats.binom_test(51, 235, p=1./6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = stats.binom_test(51, 235, p=1./6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = stats.binom_test(51, 235, p=1./6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestFligner(TestCase):
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1,x1**2),
(3.2282229927203536, 0.072379187848207877), 11)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean')
Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed', proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed', proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1,1,21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1,1,21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood(TestCase):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478), 11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
#Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is less than 3
assert_raises(ValueError, stats.mood, [1], [])
class TestProbplot(TestCase):
def test_basic(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
np.random.seed(123456)
x = stats.norm.rvs(size=100)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
np.random.seed(7654321)
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
def test_wilcoxon_bad_arg():
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1,2])
assert_raises(ValueError, stats.wilcoxon, [1,2], [1,2], "dummy")
class TestKstat(TestCase):
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = []
for n in [1, 2, 3, 4]:
moments.append(stats.kstat(data, n))
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar(TestCase):
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182, 0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10, dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax(TestCase):
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5)
def test_dist(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=5)
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=5)
class TestBoxcox_llf(TestCase):
def test_basic(self):
np.random.seed(54321)
x = stats.norm.rvs(size=10000, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
class TestBoxcox(TestCase):
def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x)
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
class TestBoxcoxNormmax(TestCase):
def setUp(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
class TestBoxcoxNormplot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestCircFuncs(TestCase):
def test_circfuncs(self):
x = np.array([355,5,2,359,10,350])
M = stats.circmean(x, high=360)
Mval = 0.167690146
assert_allclose(M, Mval, rtol=1e-7)
V = stats.circvar(x, high=360)
Vval = 42.51955609
assert_allclose(V, Vval, rtol=1e-7)
S = stats.circstd(x, high=360)
Sval = 6.520702116
assert_allclose(S, Sval, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20,21,22,18,19,20.5,19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
def test_circmean_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355,5,2,359,10,350],
[351,7,4,352,9,349],
[357,9,8,358,4,356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:,i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
def test_circfuncs_array_like(self):
x = [355,5,2,359,10,350]
assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
def test_empty(self):
assert_(np.isnan(stats.circmean([])))
assert_(np.isnan(stats.circstd([])))
assert_(np.isnan(stats.circvar([])))
def test_accuracy_wilcoxon():
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt")
assert_allclose(T, 423)
assert_allclose(p, 0.00197547303533107)
T, p = stats.wilcoxon(x, y, "zsplit")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False)
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True)
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes():
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie():
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10)
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True)
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
class TestMedianTest(TestCase):
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], ties="foo")
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5], foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency table,
# so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
if __name__ == "__main__":
run_module_suite()
| mit |
cainiaocome/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
costypetrisor/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/testing/decorators.py | 3 | 14097 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import functools
import gc
import os
import sys
import shutil
import warnings
import unittest
import nose
import numpy as np
import matplotlib as mpl
import matplotlib.style
import matplotlib.units
import matplotlib.testing
from matplotlib import cbook
from matplotlib import ticker
from matplotlib import pyplot as plt
from matplotlib import ft2font
from matplotlib.testing.noseclasses import KnownFailureTest, \
KnownFailureDidNotFailTest, ImageComparisonFailure
from matplotlib.testing.compare import comparable_formats, compare_images, \
make_test_filename
def knownfailureif(fail_condition, msg=None, known_exception_class=None ):
"""
Assume a will fail if *fail_condition* is True. *fail_condition*
may also be False or the string 'indeterminate'.
*msg* is the error message displayed for the test.
If *known_exception_class* is not None, the failure is only known
if the exception is an instance of this class. (Default = None)
"""
# based on numpy.testing.dec.knownfailureif
if msg is None:
msg = 'Test known to fail'
def known_fail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def failer(*args, **kwargs):
try:
# Always run the test (to generate images).
result = f(*args, **kwargs)
except Exception as err:
if fail_condition:
if known_exception_class is not None:
if not isinstance(err,known_exception_class):
# This is not the expected exception
raise
# (Keep the next ultra-long comment so in shows in console.)
raise KnownFailureTest(msg) # An error here when running nose means that you don't have the matplotlib.testing.noseclasses:KnownFailure plugin in use.
else:
raise
if fail_condition and fail_condition != 'indeterminate':
raise KnownFailureDidNotFailTest(msg)
return result
return nose.tools.make_decorator(f)(failer)
return known_fail_decorator
def _do_cleanup(original_units_registry, original_settings):
plt.close('all')
gc.collect()
mpl.rcParams.clear()
mpl.rcParams.update(original_settings)
matplotlib.units.registry.clear()
matplotlib.units.registry.update(original_units_registry)
warnings.resetwarnings() # reset any warning filters set in tests
class CleanupTest(object):
@classmethod
def setup_class(cls):
cls.original_units_registry = matplotlib.units.registry.copy()
cls.original_settings = mpl.rcParams.copy()
matplotlib.testing.setup()
@classmethod
def teardown_class(cls):
_do_cleanup(cls.original_units_registry,
cls.original_settings)
def test(self):
self._func()
class CleanupTestCase(unittest.TestCase):
'''A wrapper for unittest.TestCase that includes cleanup operations'''
@classmethod
def setUpClass(cls):
import matplotlib.units
cls.original_units_registry = matplotlib.units.registry.copy()
cls.original_settings = mpl.rcParams.copy()
@classmethod
def tearDownClass(cls):
_do_cleanup(cls.original_units_registry,
cls.original_settings)
def cleanup(func):
@functools.wraps(func)
def wrapped_function(*args, **kwargs):
original_units_registry = matplotlib.units.registry.copy()
original_settings = mpl.rcParams.copy()
try:
func(*args, **kwargs)
finally:
_do_cleanup(original_units_registry,
original_settings)
return wrapped_function
def check_freetype_version(ver):
if ver is None:
return True
from distutils import version
if isinstance(ver, six.string_types):
ver = (ver, ver)
ver = [version.StrictVersion(x) for x in ver]
found = version.StrictVersion(ft2font.__freetype_version__)
return found >= ver[0] and found <= ver[1]
class ImageComparisonTest(CleanupTest):
@classmethod
def setup_class(cls):
cls._initial_settings = mpl.rcParams.copy()
try:
matplotlib.style.use(cls._style)
except:
# Restore original settings before raising errors during the update.
mpl.rcParams.clear()
mpl.rcParams.update(cls._initial_settings)
raise
# Because the setup of a CleanupTest might involve
# modifying a few rcparams, this setup should come
# last prior to running the image test.
CleanupTest.setup_class()
cls.original_settings = cls._initial_settings
cls._func()
@classmethod
def teardown_class(cls):
CleanupTest.teardown_class()
@staticmethod
def remove_text(figure):
figure.suptitle("")
for ax in figure.get_axes():
ax.set_title("")
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
try:
ax.zaxis.set_major_formatter(ticker.NullFormatter())
ax.zaxis.set_minor_formatter(ticker.NullFormatter())
except AttributeError:
pass
def test(self):
baseline_dir, result_dir = _image_directories(self._func)
for fignum, baseline in zip(plt.get_fignums(), self._baseline_images):
for extension in self._extensions:
will_fail = not extension in comparable_formats()
if will_fail:
fail_msg = 'Cannot compare %s files on this system' % extension
else:
fail_msg = 'No failure expected'
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.' + extension
if extension == 'eps' and not os.path.exists(orig_expected_fname):
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.pdf'
expected_fname = make_test_filename(os.path.join(
result_dir, os.path.basename(orig_expected_fname)), 'expected')
actual_fname = os.path.join(result_dir, baseline) + '.' + extension
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
will_fail = True
fail_msg = 'Do not have baseline image %s' % expected_fname
@knownfailureif(
will_fail, fail_msg,
known_exception_class=ImageComparisonFailure)
def do_test():
figure = plt.figure(fignum)
if self._remove_text:
self.remove_text(figure)
figure.savefig(actual_fname, **self._savefig_kwarg)
err = compare_images(expected_fname, actual_fname,
self._tol, in_decorator=True)
try:
if not os.path.exists(expected_fname):
raise ImageComparisonFailure(
'image does not exist: %s' % expected_fname)
if err:
raise ImageComparisonFailure(
'images not close: %(actual)s vs. %(expected)s '
'(RMS %(rms).3f)'%err)
except ImageComparisonFailure:
if not check_freetype_version(self._freetype_version):
raise KnownFailureTest(
"Mismatched version of freetype. Test requires '%s', you have '%s'" %
(self._freetype_version, ft2font.__freetype_version__))
raise
yield (do_test,)
def image_comparison(baseline_images=None, extensions=None, tol=13,
freetype_version=None, remove_text=False,
savefig_kwarg=None, style='classic'):
"""
call signature::
image_comparison(baseline_images=['my_figure'], extensions=None)
Compare images generated by the test with those specified in
*baseline_images*, which must correspond else an
ImageComparisonFailure exception will be raised.
Keyword arguments:
*baseline_images*: list
A list of strings specifying the names of the images generated
by calls to :meth:`matplotlib.figure.savefig`.
*extensions*: [ None | list ]
If *None*, default to all supported extensions.
Otherwise, a list of extensions to test. For example ['png','pdf'].
*tol*: (default 13)
The RMS threshold above which the test is considered failed.
*freetype_version*: str or tuple
The expected freetype version or range of versions for this
test to pass.
*remove_text*: bool
Remove the title and tick text from the figure before
comparison. This does not remove other, more deliberate,
text, such as legends and annotations.
*savefig_kwarg*: dict
Optional arguments that are passed to the savefig method.
*style*: string
Optional name for the base style to apply to the image
test. The test itself can also apply additional styles
if desired. Defaults to the 'classic' style.
"""
if baseline_images is None:
raise ValueError('baseline_images must be specified')
if extensions is None:
# default extensions to test
extensions = ['png', 'pdf', 'svg']
if savefig_kwarg is None:
#default no kwargs to savefig
savefig_kwarg = dict()
def compare_images_decorator(func):
# We want to run the setup function (the actual test function
# that generates the figure objects) only once for each type
# of output file. The only way to achieve this with nose
# appears to be to create a test class with "setup_class" and
# "teardown_class" methods. Creating a class instance doesn't
# work, so we use type() to actually create a class and fill
# it with the appropriate methods.
name = func.__name__
# For nose 1.0, we need to rename the test function to
# something without the word "test", or it will be run as
# well, outside of the context of our image comparison test
# generator.
func = staticmethod(func)
func.__get__(1).__name__ = str('_private')
new_class = type(
name,
(ImageComparisonTest,),
{'_func': func,
'_baseline_images': baseline_images,
'_extensions': extensions,
'_tol': tol,
'_freetype_version': freetype_version,
'_remove_text': remove_text,
'_savefig_kwarg': savefig_kwarg,
'_style': style})
return new_class
return compare_images_decorator
def _image_directories(func):
"""
Compute the baseline and result image directories for testing *func*.
Create the result directory if it doesn't exist.
"""
module_name = func.__module__
if module_name == '__main__':
# FIXME: this won't work for nested packages in matplotlib.tests
warnings.warn('test module run as script. guessing baseline image locations')
script_name = sys.argv[0]
basedir = os.path.abspath(os.path.dirname(script_name))
subdir = os.path.splitext(os.path.split(script_name)[1])[0]
else:
mods = module_name.split('.')
if len(mods) >= 3:
mods.pop(0)
# mods[0] will be the name of the package being tested (in
# most cases "matplotlib") However if this is a
# namespace package pip installed and run via the nose
# multiprocess plugin or as a specific test this may be
# missing. See https://github.com/matplotlib/matplotlib/issues/3314
assert mods.pop(0) == 'tests'
subdir = os.path.join(*mods)
import imp
def find_dotted_module(module_name, path=None):
"""A version of imp which can handle dots in the module name"""
res = None
for sub_mod in module_name.split('.'):
try:
res = file, path, _ = imp.find_module(sub_mod, path)
path = [path]
if file is not None:
file.close()
except ImportError:
# assume namespace package
path = sys.modules[sub_mod].__path__
res = None, path, None
return res
mod_file = find_dotted_module(func.__module__)[1]
basedir = os.path.dirname(mod_file)
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
cbook.mkdirs(result_dir)
return baseline_dir, result_dir
def switch_backend(backend):
def switch_backend_decorator(func):
def backend_switcher(*args, **kwargs):
try:
prev_backend = mpl.get_backend()
matplotlib.testing.setup()
plt.switch_backend(backend)
result = func(*args, **kwargs)
finally:
plt.switch_backend(prev_backend)
return result
return nose.tools.make_decorator(func)(backend_switcher)
return switch_backend_decorator
| apache-2.0 |
sampathweb/cs109_twitterapp | app/twitterword_old.py | 1 | 3270 | #-------------------------------------------------------------------------------
# Name: twitter recommender
# Purpose: for cs109 call
#
# Author: bconnaughton
#
# Created: 08/12/2013
# Copyright: (c) bconnaughton 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
from collections import defaultdict
import json
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import rcParams
import matplotlib.cm as cm
import matplotlib as mpl
#Specific for what is used below
#import oauth2 as oauth
import urlparse
import requests
import csv
from pattern import web
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import MultinomialNB
from app.helpers import get_words_df
def main():
pass
if __name__ == '__main__':
main()
def make_xy(df, vectorizer=None):
#Make bag of words, X is the count array, y is the columns
if vectorizer is None:
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(df.Tweet)
X = X.tocsc() # some versions of sklearn return COO format
Y = (df.was_retweeted == True).values.astype(np.int)
return X, Y
def log_likelihood(clf, x, y):
prob = clf.predict_log_proba(x)
rotten = y == 0
fresh = ~rotten
return prob[rotten, 0].sum() + prob[fresh, 1].sum()
from sklearn.cross_validation import KFold
def cv_score(clf, x, y, score_func):
"""
Uses 5-fold cross validation to estimate a score of a classifier
Inputs
------
clf : Classifier object
x : Input feature vector
y : Input class labels
score_func : Function like log_likelihood, that takes (clf, x, y) as input,
and returns a score
Returns
-------
The average score obtained by randomly splitting (x, y) into training and
test sets, fitting on the training set, and evaluating score_func on the test set
Examples
cv_score(clf, x, y, log_likelihood)
"""
result = 0
nfold = 5
for train, test in KFold(y.size, nfold): # split data into train/test groups, 5 times
clf.fit(x[train], y[train]) # fit
result += score_func(clf, x[test], y[test]) # evaluate score function on held-out data
return result / nfold # average
def recommend(twitterword):
newpd = get_words_df()
# newpd = pd.read_csv('twitter_bigdf_useravg.csv')
newpd['Tweet'] = newpd['Tweet'].apply(str)
newpd['was_retweeted'] = newpd['average_retweet_threshold']
X, Y = make_xy(newpd)
xtrain, xtest, ytrain, ytest = train_test_split(X, Y)
clf = MultinomialNB().fit(xtrain, ytrain)
best_alpha = 50.0
best_min_df = 0.01
words = np.array(vectorizer.get_feature_names())
x = np.eye(xtest.shape[1])
probs = clf.predict_log_proba(x)[:, 0]
ind = np.argsort(probs)
good_words = words[ind[:10]]
bad_words = words[ind[-10:]]
good_prob = probs[ind[:10]]
bad_prob = probs[ind[-10:]]
return clf.predict_proba(vectorizer.transform([twitterword]))
| mit |
1iyiwei/pyml | code/ch03/share.py | 2 | 1904 | import numpy as np
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import warnings
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02, xlabel='', ylabel='', title=''):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
# highlight test samples
if test_idx:
# plot all samples
if not versiontuple(np.__version__) >= versiontuple('1.9.0'):
X_test, y_test = X[list(test_idx), :], y[list(test_idx)]
warnings.warn('Please update to NumPy 1.9.0 or newer')
else:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0],
X_test[:, 1],
c='',
alpha=1.0,
linewidths=1,
marker='o',
s=55, label='test set')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
| mit |
cbertinato/pandas | pandas/io/json/json.py | 1 | 33725 | from io import StringIO
from itertools import islice
import os
import numpy as np
import pandas._libs.json as json
from pandas._libs.tslibs import iNaT
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.common import ensure_str, is_period_dtype
from pandas import DataFrame, MultiIndex, Series, isna, to_datetime
from pandas.core.reshape.concat import concat
from pandas.io.common import (
BaseIterator, _get_handle, _infer_compression, _stringify_path,
get_filepath_or_buffer)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import _validate_integer
from .normalize import _convert_to_line_delimits
from .table_schema import build_table_schema, parse_table_schema
loads = json.loads
dumps = json.dumps
TABLE_SCHEMA_VERSION = '0.20.0'
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression='infer',
index=True):
if not index and orient not in ['split', 'table']:
raise ValueError("'index=False' is only valid when 'orient' is "
"'split' or 'table'")
path_or_buf = _stringify_path(path_or_buf)
if lines and orient != 'records':
raise ValueError(
"'lines' keyword only valid when 'orient' is records")
if orient == 'table' and isinstance(obj, Series):
obj = obj.to_frame(name=obj.name or 'values')
if orient == 'table' and isinstance(obj, DataFrame):
writer = JSONTableWriter
elif isinstance(obj, Series):
writer = SeriesWriter
elif isinstance(obj, DataFrame):
writer = FrameWriter
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
s = writer(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler,
index=index).write()
if lines:
s = _convert_to_line_delimits(s)
if isinstance(path_or_buf, str):
fh, handles = _get_handle(path_or_buf, 'w', compression=compression)
try:
fh.write(s)
finally:
fh.close()
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer:
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, index, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.index = index
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
return self._write(self.obj, self.orient, self.double_precision,
self.ensure_ascii, self.date_unit,
self.date_format == 'iso', self.default_handler)
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
return dumps(
obj,
orient=orient,
double_precision=double_precision,
ensure_ascii=ensure_ascii,
date_unit=date_unit,
iso_dates=iso_dates,
default_handler=default_handler
)
class SeriesWriter(Writer):
_default_orient = 'index'
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'{orient}'".format(orient=self.orient))
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
if not self.index and orient == 'split':
obj = {"name": obj.name, "data": obj.values}
return super()._write(obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler)
class FrameWriter(Writer):
_default_orient = 'columns'
def _format_axes(self):
"""
Try to format axes if they are datelike.
"""
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'{orient}'.".format(orient=self.orient))
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'{orient}'.".format(orient=self.orient))
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
if not self.index and orient == 'split':
obj = obj.to_dict(orient='split')
del obj["index"]
return super()._write(obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler)
class JSONTableWriter(FrameWriter):
_default_orient = 'records'
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, index, default_handler=None):
"""
Adds a `schema` attribute with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super().__init__(obj, orient, date_format, double_precision,
ensure_ascii, date_unit, index,
default_handler=default_handler)
if date_format != 'iso':
msg = ("Trying to write with `orient='table'` and "
"`date_format='{fmt}'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`"
.format(fmt=date_format))
raise ValueError(msg)
self.schema = build_table_schema(obj, index=self.index)
# NotImplemented on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
raise NotImplementedError(
"orient='table' is not supported for MultiIndex")
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if ((obj.ndim == 1) and (obj.name in set(obj.index.names)) or
len(obj.columns & obj.index.names)):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=['timedelta']).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(
lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
# exclude index from obj if index=False
if not self.index:
self.obj = obj.reset_index(drop=True)
else:
self.obj = obj.reset_index(drop=False)
self.date_format = 'iso'
self.orient = 'records'
self.index = index
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
data = super()._write(obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler)
serialized = '{{"schema": {schema}, "data": {data}}}'.format(
schema=dumps(self.schema), data=data)
return serialized
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None,
convert_axes=None, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False, chunksize=None, compression='infer'):
"""
Convert a JSON string to pandas object.
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values', 'table'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
.. versionadded:: 0.23.0
'table' as an allowed value for the ``orient`` argument
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default None
If True, infer dtypes; if a dict of column to dtype, then use those;
if False, then don't infer dtypes at all, applies only to the data.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_axes : boolean, default None
Try to convert the axes to the proper dtypes.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
chunksize : integer, default None
Return JsonReader object for iteration.
See the `line-delimited json docs
<http://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_
for more information on ``chunksize``.
This can only be passed if `lines=True`.
If this is None, the file will be read into memory all at once.
.. versionadded:: 0.21.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buf is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
.. versionadded:: 0.21.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
See Also
--------
DataFrame.to_json
Notes
-----
Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
:class:`Index` name of `index` gets written with :func:`to_json`, the
subsequent read operation will incorrectly set the :class:`Index` name to
``None``. This is because `index` is also used by :func:`DataFrame.to_json`
to denote a missing :class:`Index` name, and the subsequent
:func:`read_json` operation cannot distinguish between the two. The same
limitation is encountered with a :class:`MultiIndex` and any names
beginning with ``'level_'``.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
if orient == 'table' and dtype:
raise ValueError("cannot pass both dtype and orient='table'")
if orient == 'table' and convert_axes:
raise ValueError("cannot pass both convert_axes and orient='table'")
if dtype is None and orient != 'table':
dtype = True
if convert_axes is None and orient != 'table':
convert_axes = True
compression = _infer_compression(path_or_buf, compression)
filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression,
)
json_reader = JsonReader(
filepath_or_buffer, orient=orient, typ=typ, dtype=dtype,
convert_axes=convert_axes, convert_dates=convert_dates,
keep_default_dates=keep_default_dates, numpy=numpy,
precise_float=precise_float, date_unit=date_unit, encoding=encoding,
lines=lines, chunksize=chunksize, compression=compression,
)
if chunksize:
return json_reader
result = json_reader.read()
if should_close:
try:
filepath_or_buffer.close()
except: # noqa: flake8
pass
return result
class JsonReader(BaseIterator):
"""
JsonReader provides an interface for reading in a JSON file.
If initialized with ``lines=True`` and ``chunksize``, can be iterated over
``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the
whole document.
"""
def __init__(self, filepath_or_buffer, orient, typ, dtype, convert_axes,
convert_dates, keep_default_dates, numpy, precise_float,
date_unit, encoding, lines, chunksize, compression):
self.path_or_buf = filepath_or_buffer
self.orient = orient
self.typ = typ
self.dtype = dtype
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.keep_default_dates = keep_default_dates
self.numpy = numpy
self.precise_float = precise_float
self.date_unit = date_unit
self.encoding = encoding
self.compression = compression
self.lines = lines
self.chunksize = chunksize
self.nrows_seen = 0
self.should_close = False
if self.chunksize is not None:
self.chunksize = _validate_integer("chunksize", self.chunksize, 1)
if not self.lines:
raise ValueError("chunksize can only be passed if lines=True")
data = self._get_data_from_filepath(filepath_or_buffer)
self.data = self._preprocess_data(data)
def _preprocess_data(self, data):
"""
At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method.
"""
if hasattr(data, 'read') and not self.chunksize:
data = data.read()
if not hasattr(data, 'read') and self.chunksize:
data = StringIO(data)
return data
def _get_data_from_filepath(self, filepath_or_buffer):
"""
The function read_json accepts three input types:
1. filepath (string-like)
2. file-like object (e.g. open file object, StringIO)
3. JSON string
This method turns (1) into (2) to simplify the rest of the processing.
It returns input types (2) and (3) unchanged.
"""
data = filepath_or_buffer
exists = False
if isinstance(data, str):
try:
exists = os.path.exists(filepath_or_buffer)
# gh-5874: if the filepath is too long will raise here
except (TypeError, ValueError):
pass
if exists or self.compression is not None:
data, _ = _get_handle(filepath_or_buffer, 'r',
encoding=self.encoding,
compression=self.compression)
self.should_close = True
self.open_stream = data
return data
def _combine_lines(self, lines):
"""
Combines a list of JSON objects into one JSON object.
"""
lines = filter(None, map(lambda x: x.strip(), lines))
return '[' + ','.join(lines) + ']'
def read(self):
"""
Read the whole JSON input into a pandas object.
"""
if self.lines and self.chunksize:
obj = concat(self)
elif self.lines:
data = ensure_str(self.data)
obj = self._get_object_parser(
self._combine_lines(data.split('\n'))
)
else:
obj = self._get_object_parser(self.data)
self.close()
return obj
def _get_object_parser(self, json):
"""
Parses a json document into a pandas object.
"""
typ = self.typ
dtype = self.dtype
kwargs = {
"orient": self.orient, "dtype": self.dtype,
"convert_axes": self.convert_axes,
"convert_dates": self.convert_dates,
"keep_default_dates": self.keep_default_dates, "numpy": self.numpy,
"precise_float": self.precise_float, "date_unit": self.date_unit
}
obj = None
if typ == 'frame':
obj = FrameParser(json, **kwargs).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
kwargs['dtype'] = dtype
obj = SeriesParser(json, **kwargs).parse()
return obj
def close(self):
"""
If we opened a stream earlier, in _get_data_from_filepath, we should
close it.
If an open stream or file was passed, we leave it open.
"""
if self.should_close:
try:
self.open_stream.close()
except (IOError, AttributeError):
pass
def __next__(self):
lines = list(islice(self.data, self.chunksize))
if lines:
lines_json = self._combine_lines(lines)
obj = self._get_object_parser(lines_json)
# Make sure that the returned objects have the right index.
obj.index = range(self.nrows_seen, self.nrows_seen + len(obj))
self.nrows_seen += len(obj)
return obj
self.close()
raise StopIteration
class Parser:
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
's': 31536000,
'ms': 31536000000,
'us': 31536000000000,
'ns': 31536000000000000}
def __init__(self, json, orient, dtype=None, convert_axes=True,
convert_dates=True, keep_default_dates=False, numpy=False,
precise_float=False, date_unit=None):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of {units}'
.format(units=self._STAMP_UNITS))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"""
Checks that dict has only the appropriate keys for orient='split'.
"""
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError("JSON data had unexpected key(s): {bad_keys}"
.format(bad_keys=pprint_thing(bad_keys)))
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
"""
Try to convert axes.
"""
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
"""
Try to parse a ndarray like into a column by inferring dtype.
"""
# don't try to coerce, unless a force conversion
if use_dtypes:
if not self.dtype:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except (TypeError, ValueError):
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == 'object':
# try float
try:
data = data.astype('float64')
result = True
except (TypeError, ValueError):
pass
if data.dtype.kind == 'f':
if data.dtype != 'float64':
# coerce floats to 64
try:
data = data.astype('float64')
result = True
except (TypeError, ValueError):
pass
# don't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
# coerce ints if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
data = new_data
result = True
except (TypeError, ValueError):
pass
# coerce ints to 64
if data.dtype == 'int':
# coerce floats to 64
try:
data = data.astype('int64')
result = True
except (TypeError, ValueError):
pass
return data, result
def _try_convert_to_date(self, data):
"""
Try to parse a ndarray like into a date column.
Try to coerce object in epoch/iso formats and integer/float in epoch
formats. Return a boolean if parsing was successful.
"""
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == 'object':
try:
new_data = data.astype('int64')
except (TypeError, ValueError, OverflowError):
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (isna(new_data.values) | (new_data > self.min_stamp) |
(new_data.values == iNaT))
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors='raise',
unit=date_unit)
except ValueError:
continue
except Exception:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = {str(k): v for k, v in loads(
json, precise_float=self.precise_float).items()}
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(
loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = {str(k): v for k, v in decoded.items()}
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
'data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
_split_keys = ('columns', 'index', 'data')
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(json, dtype=None, numpy=True, labelled=True,
precise_float=self.precise_float)
if len(args):
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = {str(k): v for k, v in decoded.items()}
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
else:
self.obj = DataFrame(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
decoded = {str(k): v for k, v in loads(
json, precise_float=self.precise_float).items()}
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None).T
elif orient == 'table':
self.obj = parse_table_schema(json,
precise_float=self.precise_float)
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
def _process_converter(self, f, filt=None):
"""
Take a conversion function and possibly recreate the frame.
"""
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False))
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
"""
Return if this col is ok to try for a date parse.
"""
if not isinstance(col, str):
return False
col_lower = col.lower()
if (col_lower.endswith('_at') or
col_lower.endswith('_time') or
col_lower == 'modified' or
col_lower == 'date' or
col_lower == 'datetime' or
col_lower.startswith('timestamp')):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: ((self.keep_default_dates and is_ok(col)) or
col in convert_dates))
| bsd-3-clause |
qPCR4vir/orange3 | Orange/tests/test_tree.py | 1 | 3871 | # Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
import numpy as np
import sklearn.tree as skl_tree
from sklearn.tree._tree import TREE_LEAF
from Orange.data import Table
from Orange.classification import TreeLearner
from Orange.regression import TreeRegressionLearner
class TestTreeLearner(unittest.TestCase):
def test_classification(self):
table = Table('iris')
learn = TreeLearner()
clf = learn(table)
Z = clf(table)
self.assertTrue(np.all(table.Y.flatten() == Z))
def test_regression(self):
table = Table('housing')
learn = TreeRegressionLearner()
model = learn(table)
pred = model(table)
self.assertTrue(np.all(table.Y.flatten() == pred))
class TestDecisionTreeClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.iris = Table('iris')
def test_full_tree(self):
table = self.iris
clf = skl_tree.DecisionTreeClassifier()
clf = clf.fit(table.X, table.Y)
Z = clf.predict(table.X)
self.assertTrue(np.all(table.Y.flatten() == Z))
def test_min_samples_split(self):
table = self.iris
lim = 5
clf = skl_tree.DecisionTreeClassifier(min_samples_split=lim)
clf = clf.fit(table.X, table.Y)
t = clf.tree_
for i in range(t.node_count):
if t.children_left[i] != TREE_LEAF:
self.assertGreaterEqual(t.n_node_samples[i], lim)
def test_min_samples_leaf(self):
table = self.iris
lim = 5
clf = skl_tree.DecisionTreeClassifier(min_samples_leaf=lim)
clf = clf.fit(table.X, table.Y)
t = clf.tree_
for i in range(t.node_count):
if t.children_left[i] == TREE_LEAF:
self.assertGreaterEqual(t.n_node_samples[i], lim)
def test_max_leaf_nodes(self):
table = self.iris
lim = 5
clf = skl_tree.DecisionTreeClassifier(max_leaf_nodes=lim)
clf = clf.fit(table.X, table.Y)
t = clf.tree_
self.assertLessEqual(t.node_count, lim * 2 - 1)
def test_criterion(self):
table = self.iris
clf = skl_tree.DecisionTreeClassifier(criterion="entropy")
clf = clf.fit(table.X, table.Y)
def test_splitter(self):
table = self.iris
clf = skl_tree.DecisionTreeClassifier(splitter="random")
clf = clf.fit(table.X, table.Y)
def test_weights(self):
table = self.iris
clf = skl_tree.DecisionTreeClassifier(max_depth=2)
clf = clf.fit(table.X, table.Y)
clfw = skl_tree.DecisionTreeClassifier(max_depth=2)
clfw = clfw.fit(table.X, table.Y, sample_weight=np.arange(len(table)))
self.assertFalse(len(clf.tree_.feature) == len(clfw.tree_.feature) and
np.all(clf.tree_.feature == clfw.tree_.feature))
def test_impurity(self):
table = self.iris
clf = skl_tree.DecisionTreeClassifier()
clf = clf.fit(table.X, table.Y)
t = clf.tree_
for i in range(t.node_count):
if t.children_left[i] == TREE_LEAF:
self.assertEqual(t.impurity[i], 0)
else:
l, r = t.children_left[i], t.children_right[i]
child_impurity = min(t.impurity[l], t.impurity[r])
self.assertLessEqual(child_impurity, t.impurity[i])
def test_navigate_tree(self):
table = self.iris
clf = skl_tree.DecisionTreeClassifier(max_depth=1)
clf = clf.fit(table.X, table.Y)
t = clf.tree_
x = table.X[0]
if x[t.feature[0]] <= t.threshold[0]:
v = t.value[t.children_left[0]][0]
else:
v = t.value[t.children_right[0]][0]
self.assertEqual(np.argmax(v), clf.predict(table.X[0]))
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.